kubernetes部署ELK-7.17.5集群及配置X-Pack认证
kibana安装在k8s232.tom.com节点,nodeport映射的端口为30002,所以访问http://10.0.0.232:30002。(PDB)是Kubernetes中的一种资源对象,用于确保在进行维护、升级或其他操作时,系统中的Pod不会被意外中断或终止。#这里修改cluster1.zk.list,kafka.eagle.url=jdbc:mysql 里面的值为自己的值。浏览器输入
1. 系统架构
IP | 角色 | 主机名 | Pod |
10.0.0.231 | Master | k8s231.tom.com | 无 |
10.0.0.232 | Node | k8s232.tom.com | elasticsear,zookeeper,kafka,filebeat,logstash |
10.0.0.233 | Node | k8s233.tom.com | elasticsear,zookeeper,kafka,filebeat,kibana |
10.0.0.234 | Node | k8s234.tom.com | elasticsear,zookeeper,kafka,filebeat,efak |
2. master节点准备工作目录
[root@k8s231 ]# mkdir -p /script/k8s/elk/{namespace,elasticsearch,filebeat,logstash,kibana,zookeeper,kafka,efak}
3. 创建名称空间
[root@k8s231 ]# cd /script/k8s/elk/namespace
[root@k8s231 namespace]# cat > namespace.yaml <<'EOF'
apiVersion: v1
kind: Namespace
metadata:
name: elk
labels:
app: elasticsearch
EOF
[root@k8s231 namespace]# ]# kubectl create -f namespace.yaml
namespace/elk created
4. 创建elasticsearch集群
4.1. 创建开启xpack需要用到的证书
#启动一个临时es容器,然后生成elastic-certificates.p12
[root@k8s231 elasticsearch]# docker run --name elastic-certs -i -w /tmp harbor.tom.com/project/elasticsearch:7.17.5 /bin/sh -c "elasticsearch-certutil ca --out /tmp/es-ca.p12 --pass '' \
&& elasticsearch-certutil cert --name security-master \
--dns security-master --ca /tmp/es-ca.p12 --pass '' --ca-pass '' \
--out /tmp/elastic-certificates.p12"
#把生成的证书拷贝出来,用于集群使用
[root@k8s231 elasticsearch]# docker cp elastic-certs:/tmp/elastic-certificates.p12 ./
#删除没用的临时镜像
[root@k8s231 elasticsearch]# docker rm elastic-certs
#用证书生成secret,然后给es各节点挂载使用
[root@k8s231 elasticsearch]# kubectl -n elk create secret generic elastic-certificates --from-file=./elastic-certificates.p12
secret/elastic-certificates created
4.2. 创建storageclass,用于创建pv
[root@k8s231 elasticsearch]# cat > es-storageclass.yaml << 'EOF'
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: es-sc
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
EOF
[root@k8s231 elasticsearch]# kubectl create -f es-storageclass.yaml
storageclass.storage.k8s.io/es-sc created
4.3. 创建3个PV给es存储数据
[root@k8s231 elasticsearch]# cat > es-pv.yaml <<EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: es-pv-0
namespace: elk
labels:
name: es-pv-0
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: es-sc
#local:
# path: /data/elastic
hostPath:
path: /data/elastic
type: DirectoryOrCreate
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s232.tom.com
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: es-pv-1
namespace: elk
labels:
name: es-pv-1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: es-sc
#local:
# path: /data/elastic
hostPath:
path: /data/elastic
type: DirectoryOrCreate
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s233.tom.com
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: es-pv-2
namespace: elk
labels:
name: es-pv-2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: es-sc
#local:
# path: /data/elastic
hostPath:
path: /data/elastic
type: DirectoryOrCreate
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s234.tom.com
EOF
[root@k8s231 elasticsearch]# kubectl apply -f es-pv.yaml
persistentvolume/es-pv-0 created
persistentvolume/es-pv-1 created
persistentvolume/es-pv-2 created
[root@k8s231 elasticsearch]# kubectl -n elk get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
es-pv-0 10Gi RWO Retain Available es-sc 2m29s
es-pv-1 10Gi RWO Retain Available es-sc 2m29s
es-pv-2 10Gi RWO Retain Available es-sc 2m29s
4.4. 创建es headless service(服务发现机制,自动发现ES节点)
[root@k8s231 elasticsearch]# cat > es-svc-headless.yaml <<'EOF'
apiVersion: v1
kind: Service
metadata:
namespace: elk
name: es-svc-headless
labels:
app: es
spec:
selector:
app: es
clusterIP: None
ports:
- port: 9200
targetPort: 9200
EOF
[root@k8s231 elasticsearch]# kubectl create -f es-svc-headless.yaml
service/es-svc-headless created
4.5. 创建Elasticsearch的StatefulSet
[root@k8s231 elasticsearch]# cat > es-statefulset.yaml <<'EOF'
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: elk
name: es-sts
labels:
app: es
spec:
serviceName: es-svc-headless
replicas: 3
selector:
matchLabels:
app: es
template:
metadata:
labels:
app: es
spec:
containers:
- name: elasticsearch
image: harbor.tom.com/project/elasticsearch:7.17.5
command: ["bash", "-c", "ulimit -l unlimited && sysctl -w vm.max_map_count=262144 && chown -R elasticsearch:elasticsearch /usr/share/elasticsearch/data && exec su elasticsearch docker-entrypoint.sh"]
ports:
- containerPort: 9200
name: http
- containerPort: 9300
name: transport
env:
- name: discovery.seed_hosts
value: "es-svc-headless"
#value: "es-sts-0.es-svc-headless,es-sts-1.es-svc-headless,es-sts-2.es-svc-headless"
- name: cluster.initial_master_nodes
#value: "es-svc-headless"
value: "es-sts-0,es-sts-1,es-sts-2"
- name: ES_JAVA_OPTS
value: -Xms512m -Xmx512m
- name: node.master
value: "true"
- name: node.ingest
value: "true"
- name: node.data
value: "true"
- name: cluster.name
value: "elasticsearch"
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: xpack.security.enabled
value: "true"
- name: xpack.security.transport.ssl.enabled
value: "true"
- name: xpack.monitoring.collection.enabled
value: "true"
- name: xpack.security.transport.ssl.verification_mode
value: "certificate"
- name: xpack.security.transport.ssl.keystore.path
value: "/usr/share/elasticsearch/config/elastic-certificates.p12"
- name: xpack.security.transport.ssl.truststore.path
value: "/usr/share/elasticsearch/config/elastic-certificates.p12"
volumeMounts:
- name: es-pvc
mountPath: /usr/share/elasticsearch/data
- name: elastic-certificates
readOnly: true
mountPath: "/usr/share/elasticsearch/config/elastic-certificates.p12"
subPath: elastic-certificates.p12
- name: localtime
mountPath: /etc/localtime
securityContext:
privileged: true
volumes:
#加载tls证书
- name: elastic-certificates
secret:
secretName: elastic-certificates
#使用宿主机时间
- name: localtime
hostPath:
path: /etc/localtime
volumeClaimTemplates:
- metadata:
name: es-pvc
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "es-sc"
resources:
requests:
storage: 10Gi
EOF
[root@k8s231 elasticsearch]# kubectl create -f es-statefulset.yaml
statefulset.apps/es-sts created
[root@k8s231 elasticsearch]# kubectl -n elk get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
es-sts-0 1/1 Running 0 3m59s 10.200.2.58 k8s234.tom.com <none> <none>
es-sts-1 1/1 Running 0 3m58s 10.200.3.222 k8s233.tom.com <none> <none>
es-sts-2 1/1 Running 0 3m57s 10.200.1.51 k8s232.tom.com <none> <none>
4.6. 配置ES集群密码
方式一:自动生成随机密码并打印终端
[root@k8s231 elasticsearch]# kubectl -n elk exec -it es-sts-0 -- bin/elasticsearch-setup-passwords auto -b
方式二:自行配置密码,为了测试方便,密码设置为Aa123456
[root@k8s231 elasticsearch]# kubectl -n elk exec -it es-sts-0 -- bin/elasticsearch-setup-passwords interactive
注意:自行选择一种配置密码方式,请牢记上面的密码,后面需要用到。
4.7. 查看ES集群状态
[root@k8s231 elasticsearch]# kubectl -n elk get pod,svc,pv,pvc,sc,pdb,sts -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/es-sts-0 1/1 Running 0 12h 10.200.2.58 k8s234.tom.com <none> <none>
pod/es-sts-1 1/1 Running 0 12h 10.200.3.222 k8s233.tom.com <none> <none>
pod/es-sts-2 1/1 Running 0 12h 10.200.1.51 k8s232.tom.com <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/es-svc-headless ClusterIP None <none> 9200/TCP 12h app=es
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
persistentvolume/es-pv-0 10Gi RWO Retain Bound elk/es-pvc-es-sts-2 es-sc 13h Filesystem
persistentvolume/es-pv-1 10Gi RWO Retain Bound elk/es-pvc-es-sts-1 es-sc 13h Filesystem
persistentvolume/es-pv-2 10Gi RWO Retain Bound elk/es-pvc-es-sts-0 es-sc 13h Filesystem
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
persistentvolumeclaim/es-pvc-es-sts-0 Bound es-pv-2 10Gi RWO es-sc 12h Filesystem
persistentvolumeclaim/es-pvc-es-sts-1 Bound es-pv-1 10Gi RWO es-sc 12h Filesystem
persistentvolumeclaim/es-pvc-es-sts-2 Bound es-pv-0 10Gi RWO es-sc 12h Filesystem
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
storageclass.storage.k8s.io/es-sc kubernetes.io/no-provisioner Delete WaitForFirstConsumer false 14h
storageclass.storage.k8s.io/zk-sc kubernetes.io/no-provisioner Delete WaitForFirstConsumer false 8m9s
NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE
poddisruptionbudget.policy/es-pdb N/A 1 1 3m49s
NAME READY AGE CONTAINERS IMAGES
statefulset.apps/es-sts 3/3 12h elasticsearch harbor.tom.com/project/elasticsearch:7.17.5
[root@k8s231 elasticsearch]# curl -u elastic:Aa123456 `dig @10.100.0.10 es-svc-headless.elk.svc.tom.com +short| grep -m 1 '^[.0-9]*$'`:9200/_cat/nodes
10.200.1.51 74 26 1 0.29 0.46 0.47 cdfhilmrstw - es-sts-2
10.200.3.222 64 26 2 0.39 0.27 0.26 cdfhilmrstw * es-sts-1
10.200.2.58 27 26 1 0.25 0.26 0.29 cdfhilmrstw - es-sts-0
5. 部署Kibana
5.1. 创建secret,用于存储ES密码
[root@k8s231 elasticsearch]# cd /script/k8s/elk/kibana
[root@k8s231 kibana]# kubectl -n elk create secret generic es-pass --from-literal password=Aa123456
secret/es-pass created
5.2. 创建kibana 配置文件的configmap
[root@k8s231 kibana]# cat > kibana-configmap.yaml << 'EOF'
apiVersion: v1
kind: ConfigMap
metadata:
namespace: elk
name: kibana-config
labels:
app: kibana
data:
kibana.yml: |-
server.host: 0.0.0.0
elasticsearch:
hosts: ${ELASTICSEARCH_HOSTS}
username: ${ELASTICSEARCH_USER}
password: ${ELASTICSEARCH_PASSWORD}
i18n.locale: "zh-CN"
EOF
[root@k8s231 kibana]# kubectl create -f kibana-configmap.yaml
configmap/kibana-config created
5.3. 创建Kibana的Deployment
[root@k8s231 kibana]# cat > kibana-deployment.yaml << 'EOF'
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: kibana
name: kibana
namespace: elk
spec:
replicas: 1
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
containers:
- name: kibana
image: harbor.tom.com/project/kibana:7.17.5
ports:
- containerPort: 5601
protocol: TCP
env:
- name: SERVER_PUBLICBASEURL
value: "http://0.0.0.0:5601"
- name: ELASTICSEARCH_HOSTS
value: "http://es-svc-headless:9200"
- name: ELASTICSEARCH_USER
value: "elastic"
#value: "kibana_system"
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: es-pass
key: password
- name: xpack.encryptedSavedObjects.encryptionKey
value: "min-32-byte-long-strong-encryption-key"
volumeMounts:
- name: kibana-config
mountPath: /usr/share/kibana/config/kibana.yml
readOnly: true
subPath: kibana.yml
- mountPath: /etc/localtime
name: localtime
volumes:
- name: kibana-config
configMap:
name: kibana-config
- name: localtime
hostPath:
path: /etc/localtime
EOF
#kibana安装到了k8s232.tom.com节点
[root@k8s231 kibana]# kubectl -n elk get pod -o wide|grep kibana
kibana-8db5869b-zxg7m 1/1 Running 0 3m27s 10.200.1.55 k8s232.tom.com <none> <none>
5.4. 创建kibana的nodeport类型的svc用于外部访问
[root@k8s231 kibana]# cat > kibana-svc.yaml <<'EOF'
kind: Service
apiVersion: v1
metadata:
labels:
app: kibana
name: kibana-svc
namespace: elk
spec:
ports:
- port: 5601
targetPort: 5601
nodePort: 30002
type: NodePort
selector:
app: kibana
EOF
[root@k8s231 kibana]# kubectl -n elk get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
es-svc NodePort 10.100.40.6 <none> 9200:30000/TCP 38m
es-svc-headless ClusterIP None <none> 9200/TCP 53m
kibana-svc NodePort 10.100.220.95 <none> 5601:30002/TCP 10s
5.5. kibana外部访问测试
kibana安装在k8s232.tom.com节点,nodeport映射的端口为30002,所以访问http://10.0.0.232:30002
用户名为:elastic 密码为:Aa123456(ES集群设置的密码)
6. 部署Zookeepe集群
6.1. 创建storageclass,用于创建pv
[root@k8s231 ]# cd /script/k8s/elk/zookeeper
[root@k8s231 zookeeper]# cat > zk-storageclass.yaml << 'EOF'
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: zk-sc
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
EOF
[root@k8s231 zookeeper]# kubectl create -f zk-storageclass.yaml
storageclass.storage.k8s.io/zk-sc created
6.2. 创建zk使用的3个pv
[root@k8s231 zookeeper]# cat > zk-pv.yaml << 'EOF'
kind: PersistentVolume
apiVersion: v1
metadata:
name: zk-pv-0
namespace: elk
spec:
storageClassName: zk-sc
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/data/zookeeper" #此目录不需要自行创建,系统会自动创建
persistentVolumeReclaimPolicy: Recycle
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: zk-pv-1
namespace: elk
spec:
storageClassName: zk-sc
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/data/zookeeper"
persistentVolumeReclaimPolicy: Recycle
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: zk-pv-2
namespace: elk
spec:
storageClassName: zk-sc
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/data/zookeeper"
persistentVolumeReclaimPolicy: Recycle
EOF
[root@k8s231 zookeeper]# kubectl create -f zk-pv.yaml
persistentvolume/zk-pv-0 created
persistentvolume/zk-pv-1 created
persistentvolume/zk-pv-2 created
[root@k8s231 zookeeper]# kubectl get pv |grep zk|column -t
zk-pv-0 1Gi RWO Recycle Available zk-sc 9m2s
zk-pv-1 1Gi RWO Recycle Available zk-sc 9m2s
zk-pv-2 1Gi RWO Recycle Available zk-sc 9m2s
6.3. 创建zk headless service(服务发现机制,自动发现zk节点)
[root@k8s231 zookeeper]# cat > zk-svc-headless.yaml <<'EOF'
apiVersion: v1
kind: Service
metadata:
name: zk-svc-headless
namespace: elk
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
EOF
[root@k8s231 zookeeper]# kubectl create -f zk-svc-headless.yaml
service/zk-svc-headless created
[root@k8s231 zookeeper]# kubectl -n elk get svc|grep zk
zk-svc-headless ClusterIP None <none> 2888/TCP,3888/TCP 46s
6.4. 创建zk的StatefulSet
[root@k8s231 zookeeper]# cat > zk-statefulset.yaml <<'EOF'
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk-sts
namespace: elk
spec:
selector:
matchLabels:
app: zk
serviceName: zk-svc-headless
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: kubernetes-zookeeper
imagePullPolicy: Always
image: harbor.tom.com/project/kubernetes-zookeeper:1.0-3.4.10
resources:
requests:
memory: "1Gi"
cpu: "0.5"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- |
mkdir -p /var/lib/zookeeper/data
chown -R zookeeper. /var/lib/zookeeper/data
start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: zk-pvc
mountPath: /var/lib/zookeeper
volumeClaimTemplates:
- metadata:
name: zk-pvc
spec:
storageClassName: zk-sc
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
EOF
[root@k8s231 zookeeper]# kubectl create -f zk-statefulset.yaml
statefulset.apps/zk-sts created
[root@k8s231 zookeeper]# kubectl -n elk get pod -o wide|grep zk|column -t
zk-sts-0 1/1 Running 0 48s 10.200.2.73 k8s234.tom.com <none> <none>
zk-sts-1 1/1 Running 0 48s 10.200.3.237 k8s233.tom.com <none> <none>
zk-sts-2 1/1 Running 0 48s 10.200.1.67 k8s232.tom.com <none> <none>
6.5. 查看zk集群各节点状态
#持续查询各节点状态,直到有follower,leader出现
#不到2分钟的时候查询,会有报错Error contacting service,集群配置正确的话,2分钟可看到leader
[root@k8s231 zookeeper]# while true;do for i in 0 1 2; do kubectl -n elk exec zk-sts-$i -- zkServer.sh status /opt/zookeeper/conf/zoo.cfg; done; sleep 5 ; done
Using config: /opt/zookeeper/conf/zoo.cfg
Error contacting service. It is probably not running.
command terminated with exit code 1
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/conf/zoo.cfg
Mode: follower
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/conf/zoo.cfg
Mode: follower
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper/conf/zoo.cfg
Mode: leader
6.6. 为了保证业务正常,设置一个pdb
PodDisruptionBudget(PDB)是Kubernetes中的一种资源对象,用于确保在进行维护、升级或其他操作时,系统中的Pod不会被意外中断或终止
[root@k8s231 zookeeper]# cat > zk-pdb.yaml <<'EOF'
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
namespace: elk
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
EOF
[root@k8s231 zookeeper]# kubectl create -f zk-pdb.yaml
poddisruptionbudget.policy/zk-pdb created
[root@k8s231 zookeeper]# kubectl -n elk get pdb
NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE
zk-pdb 2 N/A 1 49s
7. 部署Kafka集群
7.1. 创建storageclass,用于创建pv
[root@k8s231 ]# cd /script/k8s/elk/kafka
[root@k8s231 kafka]# cat > kafka-storageclass.yaml <<'EOF'
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: kafka-sc
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
EOF
[root@k8s231 kafka]# kubectl create -f kafka-storageclass.yaml
storageclass.storage.k8s.io/kafka-sc created
[root@k8s231 kafka]# kubectl -n elk get sc|grep kafka|column -t
kafka-sc kubernetes.io/no-provisioner Delete WaitForFirstConsumer false 77s
7.2. 创建kafka使用的3个pv
[root@k8s231 kafka]# cat > kafka-pv.yaml <<'EOF'
apiVersion: v1
kind: PersistentVolume
metadata:
name: kafka-pv-0
namespace: elk
labels:
name: kafka-pv-0
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: kafka-sc
hostPath:
path: /data/kafka
type: DirectoryOrCreate
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s232.tom.com
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: kafka-pv-1
namespace: elk
labels:
name: kafka-pv-1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: kafka-sc
hostPath:
path: /data/kafka
type: DirectoryOrCreate
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s233.tom.com
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: kafka-pv-2
namespace: elk
labels:
name: kafka-pv-2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: kafka-sc
hostPath:
path: /data/kafka
type: DirectoryOrCreate
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s234.tom.com
EOF
[root@k8s231 kafka]# kubectl create -f kafka-pv.yaml
persistentvolume/kafka-pv-0 created
persistentvolume/kafka-pv-1 created
persistentvolume/kafka-pv-2 created
[root@k8s231 kafka]# kubectl -n elk get pv|grep kafka|column -t
kafka-pv-0 10Gi RWO Retain Available kafka-sc 14s
kafka-pv-1 10Gi RWO Retain Available kafka-sc 14s
kafka-pv-2 10Gi RWO Retain Available kafka-sc 14s
7.3. 创建zk service(不对外服务,可不创建)
[root@k8s231 kafka]# cat > kafka-svc.yaml <<'EOF'
apiVersion: v1
kind: Service
metadata:
name: kafka-svc
namespace: elk
labels:
app: kafka
spec:
type: NodePort
ports:
- port: 9092
nodePort: 30001
targetPort: 9092
selector:
app: kafka
EOF
[root@k8s231 kafka]# kubectl create -f kafka-svc.yaml
service/kafka-svc created
[root@k8s231 kafka]# kubectl -n elk get svc|grep kafka|column -t
kafka-svc NodePort 10.100.253.67 <none> 9092:30001/TCP 25s
7.4. 创建kafka的StatefulSet
[root@k8s231 kafka]# cat > kafka-statefulset.yaml <<'EOF'
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: kafka-sts
namespace: elk
spec:
selector:
matchLabels:
app: kafka
serviceName: kafka-svc-headless
replicas: 3
template:
metadata:
labels:
app: kafka
spec:
terminationGracePeriodSeconds: 300
containers:
- name: kafka
imagePullPolicy: IfNotPresent
image: harbor.tom.com/project/skafka:2.2.0
#image: fastop/kafka:2.2.0
resources:
requests:
memory: "600Mi"
cpu: 500m
ports:
- containerPort: 9092
name: server
command:
- sh
- -c
- "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
--override listeners=PLAINTEXT://:9092 \
--override zookeeper.connect=zk-svc-headless:2181 \
--override log.dir=/var/lib/kafka \
--override auto.create.topics.enable=true \
--override auto.leader.rebalance.enable=true \
--override background.threads=10 \
--override compression.type=producer \
--override delete.topic.enable=false \
--override leader.imbalance.check.interval.seconds=300 \
--override leader.imbalance.per.broker.percentage=10 \
--override log.flush.interval.messages=9223372036854775807 \
--override log.flush.offset.checkpoint.interval.ms=60000 \
--override log.flush.scheduler.interval.ms=9223372036854775807 \
--override log.retention.bytes=-1 \
--override log.retention.hours=168 \
--override log.roll.hours=168 \
--override log.roll.jitter.hours=0 \
--override log.segment.bytes=1073741824 \
--override log.segment.delete.delay.ms=60000 \
--override message.max.bytes=1000012 \
--override min.insync.replicas=1 \
--override num.io.threads=8 \
--override num.network.threads=3 \
--override num.recovery.threads.per.data.dir=1 \
--override num.replica.fetchers=1 \
--override offset.metadata.max.bytes=4096 \
--override offsets.commit.required.acks=-1 \
--override offsets.commit.timeout.ms=5000 \
--override offsets.load.buffer.size=5242880 \
--override offsets.retention.check.interval.ms=600000 \
--override offsets.retention.minutes=1440 \
--override offsets.topic.compression.codec=0 \
--override offsets.topic.num.partitions=50 \
--override offsets.topic.replication.factor=3 \
--override offsets.topic.segment.bytes=104857600 \
--override queued.max.requests=500 \
--override quota.consumer.default=9223372036854775807 \
--override quota.producer.default=9223372036854775807 \
--override replica.fetch.min.bytes=1 \
--override replica.fetch.wait.max.ms=500 \
--override replica.high.watermark.checkpoint.interval.ms=5000 \
--override replica.lag.time.max.ms=10000 \
--override replica.socket.receive.buffer.bytes=65536 \
--override replica.socket.timeout.ms=30000 \
--override request.timeout.ms=30000 \
--override socket.receive.buffer.bytes=102400 \
--override socket.request.max.bytes=104857600 \
--override socket.send.buffer.bytes=102400 \
--override unclean.leader.election.enable=true \
--override zookeeper.session.timeout.ms=6000 \
--override zookeeper.set.acl=false \
--override broker.id.generation.enable=true \
--override connections.max.idle.ms=600000 \
--override controlled.shutdown.enable=true \
--override controlled.shutdown.max.retries=3 \
--override controlled.shutdown.retry.backoff.ms=5000 \
--override controller.socket.timeout.ms=30000 \
--override default.replication.factor=1 \
--override fetch.purgatory.purge.interval.requests=1000 \
--override group.max.session.timeout.ms=300000 \
--override group.min.session.timeout.ms=6000 \
--override inter.broker.protocol.version=2.2.0 \
--override log.cleaner.backoff.ms=15000 \
--override log.cleaner.dedupe.buffer.size=134217728 \
--override log.cleaner.delete.retention.ms=86400000 \
--override log.cleaner.enable=true \
--override log.cleaner.io.buffer.load.factor=0.9 \
--override log.cleaner.io.buffer.size=524288 \
--override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \
--override log.cleaner.min.cleanable.ratio=0.5 \
--override log.cleaner.min.compaction.lag.ms=0 \
--override log.cleaner.threads=1 \
--override log.cleanup.policy=delete \
--override log.index.interval.bytes=4096 \
--override log.index.size.max.bytes=10485760 \
--override log.message.timestamp.difference.max.ms=9223372036854775807 \
--override log.message.timestamp.type=CreateTime \
--override log.preallocate=false \
--override log.retention.check.interval.ms=300000 \
--override max.connections.per.ip=2147483647 \
--override num.partitions=4 \
--override producer.purgatory.purge.interval.requests=1000 \
--override replica.fetch.backoff.ms=1000 \
--override replica.fetch.max.bytes=1048576 \
--override replica.fetch.response.max.bytes=10485760 \
--override reserved.broker.max.id=1000 "
env:
- name: KAFKA_HEAP_OPTS
value : "-Xmx512M -Xms512M"
- name: KAFKA_OPTS
value: "-Dlogging.level=INFO"
volumeMounts:
- name: kafka-pvc
mountPath: /var/lib/kafka
readinessProbe:
tcpSocket:
port: 9092
timeoutSeconds: 1
initialDelaySeconds: 5
securityContext:
runAsUser: 1000
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: kafka-pvc
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: kafka-sc
resources:
requests:
storage: 10Gi
EOF
[root@k8s231 kafka]# kubectl create -f kafka-statefulset.yaml
statefulset.apps/kafka-sts created
[root@k8s231 kafka]# kubectl -n elk get pod,pv,pvc|grep kafka|column -t
pod/kafka-sts-0 1/1 Running 0 44s
pod/kafka-sts-1 1/1 Running 0 33s
pod/kafka-sts-2 1/1 Running 0 21s
persistentvolume/kafka-pv-0 10Gi RWO Retain Bound elk/kafka-pvc-kafka-sts-2 kafka-sc 5m29s
persistentvolume/kafka-pv-1 10Gi RWO Retain Bound elk/kafka-pvc-kafka-sts-1 kafka-sc 5m29s
persistentvolume/kafka-pv-2 10Gi RWO Retain Bound elk/kafka-pvc-kafka-sts-0 kafka-sc 5m29s
persistentvolumeclaim/kafka-pvc-kafka-sts-0 Bound kafka-pv-2 10Gi RWO kafka-sc 44s
persistentvolumeclaim/kafka-pvc-kafka-sts-1 Bound kafka-pv-1 10Gi RWO kafka-sc 33s
persistentvolumeclaim/kafka-pvc-kafka-sts-2 Bound kafka-pv-0 10Gi RWO kafka-sc 21s
7.5. 验证kafka集群是否正常
#创建一个名为test1的topic
[root@k8s231 kafka]# kubectl -n elk exec kafka-sts-0 -- /opt/kafka/bin//kafka-topics.sh --create --topic test1 --zookeeper zk-svc-headless:2181 --partitions 3 --replication-factor 3
Created topic test1.
#查询topic是否创建成功
[root@k8s231 kafka]# kubectl -n elk exec kafka-sts-0 -- /opt/kafka/bin/kafka-topics.sh --list --zookeeper zk-svc-headless:2181
test1
#往topic里面写数据
>[root@k8s231 kafka]# kubectl -n elk exec -it kafka-sts-0 -- /opt/kafka/bin/kafka-console-producer.sh --topic test1 --broker-list kafka-sts-0.kafka-svc-headless.elk.svc.tom.com:9092
>1
>2
>3
>4545
>122
#从topic里面读数据
[root@k8s231 kafka]# kubectl -n elk exec -it kafka-sts-0 -- /opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server kafka-sts-0.kafka-svc-headless.elk.svc.tom.com:9092 --topic test1 --from-beginning
3
1
4545
2
122
8. 部署Filebeat
注:Filebeat使用DemoSet,并且是自动发现日志模式
8.1. 创建Filebeat的configmap
[root@k8s231 filebeat]# cd /script/k8s/elk/filebeat
[root@k8s231 filebeat]# cat > filebeat-configmap.yaml <<'EOF'
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-configmap
namespace: elk
labels:
k8s-app: filebeat
data:
filebeat.yml: |
filebeat.inputs:
- type: container
paths:
- '/var/lib/docker/containers/*/*.log'
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/lib/docker/containers/"
processors:
- add_cloud_metadata:
- add_host_metadata:
output:
kafka:
enabled: true
hosts: ["kafka-svc-headless:9092"]
#hosts: ["kafka-0.kafka.kube-elasticsearch.svc.cluster.local:9092","kafka-1.kafka.kube-elasticsearch.svc.cluster.local:9092","kafka-2.kafka.kube-elasticsearch.svc.cluster.local:9092"]
topic: "filebeat"
max_message_bytes: 5242880
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.hosts: ["http://es-svc-headless:9200"]
xpack.monitoring.elasticsearch.username: "elastic"
xpack.monitoring.elasticsearch.password: "Aa123456"
EOF
[root@k8s231 filebeat]# kubectl create -f filebeat-configmap.yaml
configmap/filebeat-configmap created
[root@k8s231 filebeat]# kubectl -n elk get cm|grep filebeat
filebeat-configmap 1 34s
8.2. 创建filebeat需要的服务账号,并授权
[root@k8s231 filebeat]# cat > filebeat-rbac.yaml <<'EOF'
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat-sa
namespace: elk
labels:
k8s-app: filebeat
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat-clusterrole-1
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
- nodes
verbs:
- get
- watch
- list
- apiGroups: ["apps"]
resources:
- replicasets
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: filebeat-role-1
namespace: elk
labels:
k8s-app: filebeat
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs: ["get", "create", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: filebeat-role-2
namespace: elk
labels:
k8s-app: filebeat
rules:
- apiGroups: [""]
resources:
- configmaps
resourceNames:
- kubeadm-config
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat-crb
subjects:
- kind: ServiceAccount
name: filebeat-sa
namespace: elk
roleRef:
kind: ClusterRole
name: filebeat-clusterrole-1
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: filebeat
namespace: elk
subjects:
- kind: ServiceAccount
name: filebeat-sa
namespace: elk
roleRef:
kind: Role
name: filebeat-role-1
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: filebeat-rolebind-kubeadm-config
namespace: elk
subjects:
- kind: ServiceAccount
name: filebeat-sa
namespace: elk
roleRef:
kind: Role
name: filebeat-role-2
apiGroup: rbac.authorization.k8s.io
EOF
[root@k8s231 filebeat]# kubectl create -f filebeat-rbac.yaml
serviceaccount/filebeat-sa created
clusterrole.rbac.authorization.k8s.io/filebeat-clusterrole-1 created
role.rbac.authorization.k8s.io/filebeat-role-1 created
role.rbac.authorization.k8s.io/filebeat-role-2 created
clusterrolebinding.rbac.authorization.k8s.io/filebeat-crb created
rolebinding.rbac.authorization.k8s.io/filebeat created
rolebinding.rbac.authorization.k8s.io/filebeat-rolebind-kubeadm-config created
[root@k8s231 filebeat]# kubectl -n elk get sa,role,rolebinding,clusterrolebinding -o wide|grep filebeat |column -t
serviceaccount/filebeat-sa 1 6m9s
role.rbac.authorization.k8s.io/filebeat-role-1 2024-08-27T08:19:48Z
role.rbac.authorization.k8s.io/filebeat-role-2 2024-08-27T08:19:48Z
rolebinding.rbac.authorization.k8s.io/filebeat Role/filebeat-role-1 6m9s elk/filebeat-sa
rolebinding.rbac.authorization.k8s.io/filebeat-rolebind-kubeadm-config Role/filebeat-role-2 6m9s elk/filebeat-sa
clusterrolebinding.rbac.authorization.k8s.io/filebeat-crb ClusterRole/filebeat-clusterrole-1 6m9s elk/filebeat-sa
8.3. 创建Filebeat的DaemonSet
[root@k8s231 filebeat]# cat > filebeat-daemonset.yaml <<'EOF'
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat-ds
namespace: elk
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat-sa
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: harbor.tom.com/project/filebeat:7.17.5
#image: docker.elastic.co/beats/filebeat:7.17.6
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0640
name: filebeat-configmap
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: data
hostPath:
path: /var/lib/filebeat/data
type: DirectoryOrCreate
EOF
[root@k8s231 filebeat]# kubectl create -f filebeat-daemonset.yaml
daemonset.apps/filebeat-ds created
[root@k8s231 filebeat]# kubectl -n elk get pod|grep filebeat
filebeat-ds-7h8l5 1/1 Running 0 47s
filebeat-ds-d9d8g 1/1 Running 0 47s
filebeat-ds-ktw9w 1/1 Running 0 47s
9. 部署Logstash
logstash消费kafka数据,处理数据之后,保存到es集群
9.1. 创建Logstash的configmap
[root@k8s231 logstash]# cd /script/k8s/elk/logstash
[root@k8s231 logstash]# cat > logstash-configmap.yaml <<'EOF'
[root@k8s231 logstash]# cat logstash-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-configmap
namespace: elk
data:
logstash.yml: |
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.hosts: ["http://es-svc-headless:9200"]
xpack.monitoring.elasticsearch.username: "logstash_system"
xpack.monitoring.elasticsearch.password: "Aa123456"
logstash.conf: |
input {
kafka {
bootstrap_servers => "kafka-svc-headless:9092"
#bootstrap_servers => "kafka-0.kafka.kube-elasticsearch.svc.cluster.local:9092,kafka-1.kafka.kube-elasticsearch.svc.cluster.local:9092,kafka-2.kafka.kube-elasticsearch.svc.cluster.local:9092"
topics => ["filebeat"]
codec => "json"
}
}
filter {
date {
match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
}
}
output {
elasticsearch {
hosts => ["es-svc-headless:9200"]
user => "elastic"
password => "Aa123456"
index => "kubernetes-%{+YYYY.MM.dd}"
}
}
EOF
[root@k8s231 logstash]# kubectl create -f logstash-configmap.yaml
configmap/logstash-configmap created
[root@k8s231 logstash]# kubectl -n elk get cm|grep logstash
logstash-configmap 2 3m10s
9.2. 创建Logstash的Deployment
[root@k8s231 logstash]# cat > logstash-deployment.yaml <<'EOF'
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash-deployment
namespace: elk
spec:
selector:
matchLabels:
app: logstash
replicas: 1
template:
metadata:
labels:
app: logstash
spec:
containers:
- name: logstash
image: harbor.tom.com/project/logstash:7.17.5
#image: docker.elastic.co/logstash/logstash:7.17.6
ports:
- containerPort: 5044
volumeMounts:
- name: config-volume
mountPath: /usr/share/logstash/config
- name: logstash-pipeline-volume
mountPath: /usr/share/logstash/pipeline
- name: localtime
mountPath: /etc/localtime
volumes:
- name: config-volume
configMap:
name: logstash-configmap
items:
- key: logstash.yml
path: logstash.yml
- name: logstash-pipeline-volume
configMap:
name: logstash-configmap
items:
- key: logstash.conf
path: logstash.conf
#使用宿主机时间
- name: localtime
hostPath:
path: /etc/localtime
EOF
[root@k8s231 logstash]# kubectl create -f logstash-deployment.yaml
deployment.apps/logstash-deployment created
[root@k8s231 logstash]# kubectl -n elk get pod|grep log
logstash-deployment-6f665f8885-q5w46 1/1 Running 0 5m5s
10. 部署EFAK(kafka eagle) kafka可视化管理工具
[root@k8s231 efak] cd /script/k8s/elk/efak
[root@k8s231 efak] mkdir configmap
10.1. 编辑system-config.properties,efak会用这个配置文件
#这里修改cluster1.zk.list,kafka.eagle.url=jdbc:mysql 里面的值为自己的值
[root@k8s231 efak]# cat > configmap/system-config.properties <<'EOF'
######################################
# multi zookeeper&kafka cluster list
######################################
kafka.eagle.zk.cluster.alias=cluster1
cluster1.zk.list=zk-sts-0.zk-svc-headless.elk.svc.tom.com:2181,zk-sts-1.zk-svc-headless.elk.svc.tom.com:2181,zk-sts-2.zk-svc-headless.elk.svc.tom.com:2181
#cluster1.zk.list=zk-svc-headless:2181
######################################
# zk client thread limit
######################################
kafka.zk.limit.size=25
######################################
# kafka eagle webui port
######################################
kafka.eagle.webui.port=8048
######################################
# kafka offset storage
######################################
cluster1.kafka.eagle.offset.storage=kafka
######################################
# enable kafka metrics
######################################
kafka.eagle.metrics.charts=true
kafka.eagle.sql.fix.error=true
######################################
# kafka sql topic records max
######################################
kafka.eagle.sql.topic.records.max=5000
######################################
# alarm email configure
######################################
kafka.eagle.mail.enable=false
kafka.eagle.mail.sa=alert_sa@163.com
kafka.eagle.mail.username=alert_sa@163.com
kafka.eagle.mail.password=mqslimczkdqabbbh
kafka.eagle.mail.server.host=smtp.163.com
kafka.eagle.mail.server.port=25
######################################
# alarm im configure
######################################
#kafka.eagle.im.dingding.enable=true
#kafka.eagle.im.dingding.url=https://oapi.dingtalk.com/robot/send?access_token=
#kafka.eagle.im.wechat.enable=true
#kafka.eagle.im.wechat.token=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=xxx&corpsecret=xxx
#kafka.eagle.im.wechat.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=
#kafka.eagle.im.wechat.touser=
#kafka.eagle.im.wechat.toparty=
#kafka.eagle.im.wechat.totag=
#kafka.eagle.im.wechat.agentid=
######################################
# delete kafka topic token
######################################
kafka.eagle.topic.token=keadmin
######################################
# kafka sasl authenticate
######################################
cluster1.kafka.eagle.sasl.enable=false
cluster1.kafka.eagle.sasl.protocol=SASL_PLAINTEXT
cluster1.kafka.eagle.sasl.mechanism=PLAIN
cluster1.kafka.eagle.sasl.jaas.config=kafka_client_jaas.conf
######################################
# kafka jdbc driver address
######################################
#kafka.eagle.driver=org.sqlite.JDBC
#kafka.eagle.url=jdbc:sqlite:/usr/local/ke.db
#kafka.eagle.username=root
#kafka.eagle.password=www.kafka-eagle.org
kafka.eagle.driver=com.mysql.jdbc.Driver
kafka.eagle.url=jdbc:mysql://efak-mysql:3306/efak
kafka.eagle.username=root
kafka.eagle.password=mysql
EOF
10.2. 编辑认证文件
[root@k8s231 efak]# cat > configmap/kafka_client_jaas.conf <<'EOF'
KafkaClient {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="admin-secret";
};
EOF
10.3. 编写mysql的资源清单
[root@k8s231 efak]# cat > mysql.yml <<'EOF'
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
namespace: elk
spec:
capacity:
storage: 30Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /data/mysql
type: DirectoryOrCreate
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8s232.tom.com
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pv-claim
namespace: elk
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 30Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql
namespace: elk
spec:
replicas: 1
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: harbor.tom.com/project/mysql:5.6
imagePullPolicy: IfNotPresent
args:
- "--ignore-db-dir=lost+found"
ports:
- containerPort: 3306
env:
- name: POSTGRES_DB
value: "efak"
- name: MYSQL_ROOT_PASSWORD
value: "mysql"
volumeMounts:
- name: mysql
mountPath: /var/lib/mysql/efak
volumes:
- name: mysql
persistentVolumeClaim:
claimName: mysql-pv-claim
---
apiVersion: v1
kind: Service
metadata:
name: efak-mysql
namespace: elk
spec:
#type: ClusterIP
clusterIP: None
selector:
app: mysql
ports:
- port: 3306
name: mysql
---
apiVersion: v1
kind: Service
metadata:
name: mysql-client
namespace: elk
spec:
type: NodePort
ports:
- port: 3306
targetPort: 3306
nodePort: 30006
selector:
app: mysql
EOF
10.4. 编写kafka-eagle资源清单
[root@k8s231 efak]# cat > kafka-eagle.yml <<EOF
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-eagle
namespace: elk
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
workload.user.cattle.io/workloadselector: deployment-kafka-kafka-eagle
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
labels:
workload.user.cattle.io/workloadselector: deployment-kafka-kafka-eagle
spec:
containers:
- image: buzhiyun/kafka-eagle:latest
imagePullPolicy: Always
name: kafka-eagle
ports:
- containerPort: 8048
name: 8048tcp01
protocol: TCP
resources: {}
securityContext:
allowPrivilegeEscalation: false
privileged: false
procMount: Default
readOnlyRootFilesystem: false
runAsNonRoot: false
stdin: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
tty: true
volumeMounts:
- mountPath: /opt/kafka-eagle/conf
name: conf
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 256
name: kafka-eagle-config
optional: false
name: conf
---
apiVersion: v1
kind: Service
metadata:
name: kafka-eagle-client
namespace: elk
spec:
type: NodePort
ports:
- port: 8048
targetPort: 8048
nodePort: 30048
selector:
workload.user.cattle.io/workloadselector: deployment-kafka-kafka-eagle
EOF
10.5. 查看文件树
[root@k8s231 efak]# tree
.
├── configmap
│ ├── kafka_client_jaas.conf
│ └── system-config.properties
├── kafka-eagle.yml
└── mysql.yml
10.6. 用configmap文件夹里的文件生成configmap
kafka-eagle会加载这2个文件到容器内
[root@k8s231 efak]# kubectl create configmap kafka-eagle-config -n elk --from-file=configmap/
10.7. 创建mysql,创建kafka-eagle
[root@k8s231 efak]# kubectl create -f mysql.yml -f kafka-eagle.yml
[root@k8s231 efak]# kubectl -n elk get pod,svc,pv,pvc,cm -o wide|grep mysql|column -t
pod/mysql-bb8c68b4c-wfhrs 1/1 Running 0 23m 10.200.1.84 k8s232.tom.com <none> <none>
service/efak-mysql ClusterIP None <none> 3306/TCP 23m app=mysql
service/mysql-client NodePort 10.100.215.135 <none> 3306:30006/TCP 23m app=mysql
persistentvolume/mysql-pv 30Gi RWO Retain Bound elk/mysql-pv-claim 23m Filesystem
persistentvolumeclaim/mysql-pv-claim Bound mysql-pv 30Gi RWO 23m Filesystem
[root@k8s231 efak]# kubectl -n elk get pod,svc,pv,pvc,cm -o wide|grep kafka-eagle|column -t
pod/kafka-eagle-5fff8fc4d6-x6bxt 1/1 Running 0 23m 10.200.2.91 k8s234.tom.com <none> <none>
service/kafka-eagle-client NodePort 10.100.199.206 <none> 8048:30048/TCP 23m workload.user.cattle.io/workloadselector=deployment-kafka-kafka-eagle
configmap/kafka-eagle-config 2 27m
10.8. 登录测试
浏览器输入: http://10.0.0.234:30048/ke
账号:admin 密码:123456
更多推荐
所有评论(0)