附014.Kubernetes Prometheus+Grafana+EFK+Kibana+Glusterfs整合解决方案

软件发布|下载排行|最新软件

当前位置:首页IT学院IT技术

附014.Kubernetes Prometheus+Grafana+EFK+Kibana+Glusterfs整合解决方案

木二   2020-03-20 我要评论

一 glusterfs存储集群部署

注意:以下为简略步骤,详情参考《附009.Kubernetes永久存储之GlusterFS独立部署》。

1.1 架构示意

1.2 相关规划

主机
IP
磁盘
备注
k8smaster01
172.24.8.71
——
Kubernetes Master节点
Heketi主机
k8smaster02
172.24.8.72
——
Kubernetes Master节点
Heketi主机
k8smaster03
172.24.8.73
——
Kubernetes Master节点
Heketi主机
k8snode01
172.24.8.74
sdb
Kubernetes Worker节点
glusterfs 01节点
k8snode02
172.24.8.75
sdb
Kubernetes Worker节点
glusterfs 02节点
k8snode03
172.24.8.76
sdb
Kubernetes Worker节点
glusterfs 03节点
提示:本规划直接使用裸磁盘完成。

1.3 安装glusterfs

# yum -y install centos-release-gluster
# yum -y install glusterfs-server
# systemctl start glusterd
# systemctl enable glusterd
提示:建议所有节点安装。

1.4 添加信任池

[root@k8snode01 ~]# gluster peer probe k8snode02
[root@k8snode01 ~]# gluster peer probe k8snode03
[root@k8snode01 ~]# gluster peer status #查看信任池状态
[root@k8snode01 ~]# gluster pool list #查看信任池列表
提示:仅需要在glusterfs任一节点执行一次即可。

1.5 安装heketi

[root@k8smaster01 ~]# yum -y install heketi heketi-client

1.6 配置heketi

[root@k8smaster01 ~]# vi /etc/heketi/heketi.json
1 { 2 "_port_comment": "Heketi Server Port Number", 3 "port": "8080", 4 5 "_use_auth": "Enable JWT authorization. Please enable for deployment", 6 "use_auth": true, 7 8 "_jwt": "Private keys for access", 9 "jwt": { 10 "_admin": "Admin has access to all APIs", 11 "admin": { 12 "key": "admin123" 13 }, 14 "_user": "User only has access to /volumes endpoint", 15 "user": { 16 "key": "xianghy" 17 } 18 }, 19 20 "_glusterfs_comment": "GlusterFS Configuration", 21 "glusterfs": { 22 "_executor_comment": [ 23 "Execute plugin. Possible choices: mock, ssh", 24 "mock: This setting is used for testing and development.", 25 " It will not send commands to any node.", 26 "ssh: This setting will notify Heketi to ssh to the nodes.", 27 " It will need the values in sshexec to be configured.", 28 "kubernetes: Communicate with GlusterFS containers over", 29 " Kubernetes exec api." 30 ], 31 "executor": "ssh", 32 33 "_sshexec_comment": "SSH username and private key file information", 34 "sshexec": { 35 "keyfile": "/etc/heketi/heketi_key", 36 "user": "root", 37 "port": "22", 38 "fstab": "/etc/fstab" 39 }, 40 41 "_db_comment": "Database file name", 42 "db": "/var/lib/heketi/heketi.db", 43 44 "_loglevel_comment": [ 45 "Set log level. Choices are:", 46 " none, critical, error, warning, info, debug", 47 "Default is warning" 48 ], 49 "loglevel" : "warning" 50 } 51 }
 

1.7 配置免秘钥

[root@k8smaster01 ~]# ssh-keygen -t rsa -q -f /etc/heketi/heketi_key -N ""
[root@k8smaster01 ~]# chown heketi:heketi /etc/heketi/heketi_key
[root@k8smaster01 ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@k8snode01
[root@k8smaster01 ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@k8snode02
[root@k8smaster01 ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@k8snode03

1.8 启动heketi

[root@k8smaster01 ~]# systemctl enable heketi.service
[root@k8smaster01 ~]# systemctl start heketi.service
[root@k8smaster01 ~]# systemctl status heketi.service
[root@k8smaster01 ~]# curl http://localhost:8080/hello #测试访问

1.9 配置Heketi拓扑

[root@k8smaster01 ~]# vi /etc/heketi/topology.json
1 { 2 "clusters": [ 3 { 4 "nodes": [ 5 { 6 "node": { 7 "hostnames": { 8 "manage": [ 9 "k8snode01" 10 ], 11 "storage": [ 12 "172.24.8.74" 13 ] 14 }, 15 "zone": 1 16 }, 17 "devices": [ 18 "https://img.qb5200.com/download-x/dev/sdb" 19 ] 20 }, 21 { 22 "node": { 23 "hostnames": { 24 "manage": [ 25 "k8snode02" 26 ], 27 "storage": [ 28 "172.24.8.75" 29 ] 30 }, 31 "zone": 1 32 }, 33 "devices": [ 34 "https://img.qb5200.com/download-x/dev/sdb" 35 ] 36 }, 37 { 38 "node": { 39 "hostnames": { 40 "manage": [ 41 "k8snode03" 42 ], 43 "storage": [ 44 "172.24.8.76" 45 ] 46 }, 47 "zone": 1 48 }, 49 "devices": [ 50 "https://img.qb5200.com/download-x/dev/sdb" 51 ] 52 } 53 ] 54 } 55 ] 56 }
 
[root@k8smaster01 ~]# echo "export HEKETI_CLI_SERVER=http://k8smaster01:8080" >> /etc/profile.d/heketi.sh
[root@k8smaster01 ~]# echo "alias heketi-cli='heketi-cli --user admin --secret admin123'" >> .bashrc
[root@k8smaster01 ~]# source /etc/profile.d/heketi.sh
[root@k8smaster01 ~]# source .bashrc
[root@k8smaster01 ~]# echo $HEKETI_CLI_SERVER
http://k8smaster01:8080
[root@k8smaster01 ~]# heketi-cli --server $HEKETI_CLI_SERVER --user admin --secret admin123 topology load --json=/etc/heketi/topology.json

1.10 集群管理及测试

[root@heketi ~]# heketi-cli cluster list #集群列表
[root@heketi ~]# heketi-cli node list #卷信息
[root@heketi ~]# heketi-cli volume list #卷信息
[root@k8snode01 ~]# gluster volume info #通过glusterfs节点查看

1.11 创建StorageClass

[root@k8smaster01 study]# vi heketi-secret.yaml
1 apiVersion: v1 2 kind: Secret 3 metadata: 4 name: heketi-secret 5 namespace: heketi 6 data: 7 key: YWRtaW4xMjM= 8 type: kubernetes.io/glusterfs
 
[root@k8smaster01 study]# kubectl create ns heketi
[root@k8smaster01 study]# kubectl create -f heketi-secret.yaml #创建heketi
[root@k8smaster01 study]# kubectl get secrets -n heketi
[root@k8smaster01 study]# vim gluster-heketi-storageclass.yaml #正式创建StorageClass
1 StorageClass 2 apiVersion: storage.k8s.io/v1 3 kind: StorageClass 4 metadata: 5 name: ghstorageclass 6 parameters: 7 resturl: "http://172.24.8.71:8080" 8 clusterid: "ad0f81f75f01d01ebd6a21834a2caa30" 9 restauthenabled: "true" 10 restuser: "admin" 11 secretName: "heketi-secret" 12 secretNamespace: "heketi" 13 volumetype: "replicate:3" 14 provisioner: kubernetes.io/glusterfs 15 reclaimPolicy: Delete
 
[root@k8smaster01 study]# kubectl create -f gluster-heketi-storageclass.yaml
注意:storageclass资源创建后不可变更,如修改只能删除后重建。
[root@k8smaster01 heketi]# kubectl get storageclasses #查看确认
NAME PROVISIONER AGE
gluster-heketi-storageclass kubernetes.io/glusterfs 85s
[root@k8smaster01 heketi]# kubectl describe storageclasses ghstorageclass
 

二 集群监控Metrics

注意:以下为简略步骤,详情参考《049.集群管理-集群监控Metrics》。

2.1 开启聚合层

开机聚合层功能,使用kubeadm默认已开启此功能,可如下查看验证。
[root@k8smaster01 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml

2.2 获取部署文件

[root@k8smaster01 ~]# git clone https://github.com/kubernetes-incubator/metrics-server.git
[root@k8smaster01 ~]# cd metrics-serverhttps://img.qb5200.com/download-x/deploy/1.8+/
[root@k8smaster01 1.8+]# vi metrics-server-deployment.yaml
1 …… 2 image: mirrorgooglecontainers/metrics-server-amd64:v0.3.6 #修改为国内源 3 command: 4 - /metrics-server 5 - --metric-resolution=30s 6 - --kubelet-insecure-tls 7 - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP #添加如上command 8 ……
 

2.3 正式部署

[root@k8smaster01 1.8+]# kubectl apply -f .
[root@k8smaster01 1.8+]# kubectl -n kube-system get pods -l k8s-app=metrics-server
[root@k8smaster01 1.8+]# kubectl -n kube-system logs -l k8s-app=metrics-server -f #可查看部署日志

2.4 确认验证

[root@k8smaster01 ~]# kubectl top nodes
[root@k8smaster01 ~]# kubectl top pods --all-namespaces
 

三 Prometheus部署

注意:以下为简略步骤,详情参考《050.集群管理-Prometheus+Grafana监控方案》。

3.1 获取部署文件

[root@k8smaster01 ~]# git clone https://github.com/prometheus/prometheus

3.2 创建命名空间

[root@k8smaster01 ~]# cd prometheushttps://img.qb5200.com/download-x/documentation/examples/
[root@k8smaster01 examples]# vi monitor-namespace.yaml
1 apiVersion: v1 2 kind: Namespace 3 metadata: 4 name: monitoring 5
[root@k8smaster01 examples]# kubectl create -f monitor-namespace.yaml

3.3 创建RBAC

[root@k8smaster01 examples]# vi rbac-setup.yml
1 apiVersion: rbac.authorization.k8s.io/v1beta1 2 kind: ClusterRole 3 metadata: 4 name: prometheus 5 rules: 6 - apiGroups: [""] 7 resources: 8 - nodes 9 - nodes/proxy 10 - services 11 - endpoints 12 - pods 13 verbs: ["get", "list", "watch"] 14 - apiGroups: 15 - extensions 16 resources: 17 - ingresses 18 verbs: ["get", "list", "watch"] 19 - nonResourceURLs: ["/metrics"] 20 verbs: ["get"] 21 --- 22 apiVersion: v1 23 kind: ServiceAccount 24 metadata: 25 name: prometheus 26 namespace: monitoring #仅需修改命名空间 27 --- 28 apiVersion: rbac.authorization.k8s.io/v1beta1 29 kind: ClusterRoleBinding 30 metadata: 31 name: prometheus 32 roleRef: 33 apiGroup: rbac.authorization.k8s.io 34 kind: ClusterRole 35 name: prometheus 36 subjects: 37 - kind: ServiceAccount 38 name: prometheus 39 namespace: monitoring #仅需修改命名空间
[root@k8smaster01 examples]# kubectl create -f rbac-setup.yml

3.4 创建Prometheus ConfigMap

[root@k8smaster01 examples]# cat prometheus-kubernetes.yml | grep -v ^$ | grep -v "#" >> prometheus-config.yaml
[root@k8smaster01 examples]# vi prometheus-config.yaml
1 apiVersion: v1 2 kind: ConfigMap 3 metadata: 4 name: prometheus-server-conf 5 labels: 6 name: prometheus-server-conf 7 namespace: monitoring #修改命名空间 8 data: 9 prometheus.yml: |- 10 global: 11 scrape_interval: 10s 12 evaluation_interval: 10s 13 14 scrape_configs: 15 - job_name: 'kubernetes-apiservers' 16 kubernetes_sd_configs: 17 - role: endpoints 18 scheme: https 19 tls_config: 20 ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 21 bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 22 relabel_configs: 23 - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] 24 action: keep 25 regex: default;kubernetes;https 26 27 - job_name: 'kubernetes-nodes' 28 scheme: https 29 tls_config: 30 ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 31 bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 32 kubernetes_sd_configs: 33 - role: node 34 relabel_configs: 35 - action: labelmap 36 regex: __meta_kubernetes_node_label_(.+) 37 - target_label: __address__ 38 replacement: kubernetes.default.svc:443 39 - source_labels: [__meta_kubernetes_node_name] 40 regex: (.+) 41 target_label: __metrics_path__ 42 replacement: /api/v1/nodes/${1}/proxy/metrics 43 44 - job_name: 'kubernetes-cadvisor' 45 scheme: https 46 tls_config: 47 ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 48 bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 49 kubernetes_sd_configs: 50 - role: node 51 relabel_configs: 52 - action: labelmap 53 regex: __meta_kubernetes_node_label_(.+) 54 - target_label: __address__ 55 replacement: kubernetes.default.svc:443 56 - source_labels: [__meta_kubernetes_node_name] 57 regex: (.+) 58 target_label: __metrics_path__ 59 replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor 60 61 - job_name: 'kubernetes-service-endpoints' 62 kubernetes_sd_configs: 63 - role: endpoints 64 relabel_configs: 65 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] 66 action: keep 67 regex: true 68 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] 69 action: replace 70 target_label: __scheme__ 71 regex: (https?) 72 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] 73 action: replace 74 target_label: __metrics_path__ 75 regex: (.+) 76 - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] 77 action: replace 78 target_label: __address__ 79 regex: ([^:]+)(?::\d+)?;(\d+) 80 replacement: $1:$2 81 - action: labelmap 82 regex: __meta_kubernetes_service_label_(.+) 83 - source_labels: [__meta_kubernetes_namespace] 84 action: replace 85 target_label: kubernetes_namespace 86 - source_labels: [__meta_kubernetes_service_name] 87 action: replace 88 target_label: kubernetes_name 89 90 - job_name: 'kubernetes-services' 91 metrics_path: /probe 92 params: 93 module: [http_2xx] 94 kubernetes_sd_configs: 95 - role: service 96 relabel_configs: 97 - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] 98 action: keep 99 regex: true 100 - source_labels: [__address__] 101 target_label: __param_target 102 - target_label: __address__ 103 replacement: blackbox-exporter.example.com:9115 104 - source_labels: [__param_target] 105 target_label: instance 106 - action: labelmap 107 regex: __meta_kubernetes_service_label_(.+) 108 - source_labels: [__meta_kubernetes_namespace] 109 target_label: kubernetes_namespace 110 - source_labels: [__meta_kubernetes_service_name] 111 target_label: kubernetes_name 112 113 - job_name: 'kubernetes-ingresses' 114 kubernetes_sd_configs: 115 - role: ingress 116 relabel_configs: 117 - source_labels: [__meta_kubernetes_ingress_annotation_prometheus_io_probe] 118 action: keep 119 regex: true 120 - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path] 121 regex: (.+);(.+);(.+) 122 replacement: ${1}://${2}${3} 123 target_label: __param_target 124 - target_label: __address__ 125 replacement: blackbox-exporter.example.com:9115 126 - source_labels: [__param_target] 127 target_label: instance 128 - action: labelmap 129 regex: __meta_kubernetes_ingress_label_(.+) 130 - source_labels: [__meta_kubernetes_namespace] 131 target_label: kubernetes_namespace 132 - source_labels: [__meta_kubernetes_ingress_name] 133 target_label: kubernetes_name 134 135 - job_name: 'kubernetes-pods' 136 kubernetes_sd_configs: 137 - role: pod 138 relabel_configs: 139 - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] 140 action: keep 141 regex: true 142 - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] 143 action: replace 144 target_label: __metrics_path__ 145 regex: (.+) 146 - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] 147 action: replace 148 regex: ([^:]+)(?::\d+)?;(\d+) 149 replacement: $1:$2 150 target_label: __address__ 151 - action: labelmap 152 regex: __meta_kubernetes_pod_label_(.+) 153 - source_labels: [__meta_kubernetes_namespace] 154 action: replace 155 target_label: kubernetes_namespace 156 - source_labels: [__meta_kubernetes_pod_name] 157 action: replace 158 target_label: kubernetes_pod_name
[root@k8smaster01 examples]# kubectl create -f prometheus-config.yaml

3.5 创建持久PVC

[root@k8smaster01 examples]# vi prometheus-pvc.yaml
1 apiVersion: v1 2 kind: PersistentVolumeClaim 3 metadata: 4 name: prometheus-pvc 5 namespace: monitoring 6 annotations: 7 volume.beta.kubernetes.io/storage-class: ghstorageclass 8 spec: 9 accessModes: 10 - ReadWriteMany 11 resources: 12 requests: 13 storage: 5Gi
[root@k8smaster01 examples]# kubectl create -f prometheus-pvc.yaml

3.6 Prometheus部署

[root@k8smaster01 examples]# vi prometheus-deployment.yml
1 apiVersion: apps/v1beta2 2 kind: Deployment 3 metadata: 4 labels: 5 name: prometheus-deployment 6 name: prometheus-server 7 namespace: monitoring 8 spec: 9 replicas: 1 10 selector: 11 matchLabels: 12 app: prometheus-server 13 template: 14 metadata: 15 labels: 16 app: prometheus-server 17 spec: 18 containers: 19 - name: prometheus-server 20 image: prom/prometheus:v2.14.0 21 command: 22 - "/bin/prometheus" 23 args: 24 - "--config.file=/etc/prometheus/prometheus.yml" 25 - "--storage.tsdb.path=/prometheus/" 26 - "--storage.tsdb.retention=72h" 27 ports: 28 - containerPort: 9090 29 protocol: TCP 30 volumeMounts: 31 - name: prometheus-config-volume 32 mountPath: /etc/prometheus/ 33 - name: prometheus-storage-volume 34 mountPath: /prometheus/ 35 serviceAccountName: prometheus 36 imagePullSecrets: 37 - name: regsecret 38 volumes: 39 - name: prometheus-config-volume 40 configMap: 41 defaultMode: 420 42 name: prometheus-server-conf 43 - name: prometheus-storage-volume 44 persistentVolumeClaim: 45 claimName: prometheus-pvc
[root@k8smaster01 examples]# kubectl create -f prometheus-deployment.yml

3.7 创建Prometheus Service

[root@k8smaster01 examples]# vi prometheus-service.yaml
1 apiVersion: v1 2 kind: Service 3 metadata: 4 labels: 5 app: prometheus-service 6 name: prometheus-service 7 namespace: monitoring 8 spec: 9 type: NodePort 10 selector: 11 app: prometheus-server 12 ports: 13 - port: 9090 14 targetPort: 9090 15 nodePort: 30001
[root@k8smaster01 examples]# kubectl create -f prometheus-service.yaml
[root@k8smaster01 examples]# kubectl get all -n monitoring

3.8 确认验证Prometheus

浏览器直接访问:http://172.24.8.100:30001/
 

四 部署grafana

注意:以下为简略步骤,详情参考《050.集群管理-Prometheus+Grafana监控方案》。

4.1 获取部署文件

[root@k8smaster01 ~]# git clone https://github.com/liukuan73/kubernetes-addons
[root@k8smaster01 ~]# cd /root/kubernetes-addons/monitor/prometheus+grafana

4.2 创建持久PVC

[root@k8smaster01 prometheus+grafana]# vi grafana-data-pvc.yaml
1 apiVersion: v1 2 kind: PersistentVolumeClaim 3 metadata: 4 name: grafana-data-pvc 5 namespace: monitoring 6 annotations: 7 volume.beta.kubernetes.io/storage-class: ghstorageclass 8 spec: 9 accessModes: 10 - ReadWriteOnce 11 resources: 12 requests: 13 storage: 5Gi
[root@k8smaster01 prometheus+grafana]# kubectl create -f grafana-data-pvc.yaml

4.3 grafana部署

[root@k8smaster01 prometheus+grafana]# vi grafana.yaml
1 apiVersion: extensions/v1beta1 2 kind: Deployment 3 metadata: 4 name: monitoring-grafana 5 namespace: monitoring 6 spec: 7 replicas: 1 8 template: 9 metadata: 10 labels: 11 task: monitoring 12 k8s-app: grafana 13 spec: 14 containers: 15 - name: grafana 16 image: grafana/grafana:6.5.0 17 imagePullPolicy: IfNotPresent 18 ports: 19 - containerPort: 3000 20 protocol: TCP 21 volumeMounts: 22 - mountPath: /var/lib/grafana 23 name: grafana-storage 24 env: 25 - name: INFLUXDB_HOST 26 value: monitoring-influxdb 27 - name: GF_SERVER_HTTP_PORT 28 value: "3000" 29 - name: GF_AUTH_BASIC_ENABLED 30 value: "false" 31 - name: GF_AUTH_ANONYMOUS_ENABLED 32 value: "true" 33 - name: GF_AUTH_ANONYMOUS_ORG_ROLE 34 value: Admin 35 - name: GF_SERVER_ROOT_URL 36 value: / 37 readinessProbe: 38 httpGet: 39 path: /login 40 port: 3000 41 volumes: 42 - name: grafana-storage 43 persistentVolumeClaim: 44 claimName: grafana-data-pvc 45 nodeSelector: 46 node-role.kubernetes.io/master: "true" 47 tolerations: 48 - key: "node-role.kubernetes.io/master" 49 effect: "NoSchedule" 50 --- 51 apiVersion: v1 52 kind: Service 53 metadata: 54 labels: 55 kubernetes.io/cluster-service: 'true' 56 kubernetes.io/name: monitoring-grafana 57 annotations: 58 prometheus.io/scrape: 'true' 59 prometheus.io/tcp-probe: 'true' 60 prometheus.io/tcp-probe-port: '80' 61 name: monitoring-grafana 62 namespace: monitoring 63 spec: 64 type: NodePort 65 ports: 66 - port: 80 67 targetPort: 3000 68 nodePort: 30002 69 selector: 70 k8s-app: grafana
[root@k8smaster01 prometheus+grafana]# kubectl label nodes k8smaster01 node-role.kubernetes.io/master=true
[root@k8smaster01 prometheus+grafana]# kubectl label nodes k8smaster02 node-role.kubernetes.io/master=true
[root@k8smaster01 prometheus+grafana]# kubectl label nodes k8smaster03 node-role.kubernetes.io/master=true
[root@k8smaster01 prometheus+grafana]# kubectl create -f grafana.yaml
[root@k8smaster01 examples]# kubectl get all -n monitoring

4.4 确认验证Prometheus

浏览器直接访问:http://172.24.8.100:30002/

4.4 grafana配置

  • 添加数据源:略
  • 创建用户:略
提示:所有grafana配置可配置参考:https://grafana.comhttps://img.qb5200.com/download-x/docs/grafana/latest/installation/configuration/。

4.5 查看监控

浏览器再次访问:http://172.24.8.100:30002/
 

五 日志管理

注意:以下为简略步骤,详情参考《051.集群管理-日志管理》。

5.1 获取部署文件

[root@k8smaster01 ~]# git clone https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/fluentd-elasticsearch
[root@k8smaster01 ~]# cd fluentd-elasticsearch/

5.2 修改相关源

[root@k8smaster01 ~]# sed -i "s/quay.io/quay-mirror.qiniu.com/g" `grep quay.io -rl ./*.yaml`
[root@k8smaster01 fluentd-elasticsearch]# vi es-statefulset.yaml
1 …… 2 - image: quay-mirror.qiniu.com/fluentd_elasticsearch/elasticsearch:v7.3.2 3 name: elasticsearch-logging 4 imagePullPolicy: IfNotPresent #添加镜像策略 5 ……
 
[root@k8smaster01 fluentd-elasticsearch]# cat fluentd-es-ds.yaml
1 …… 2 image: quay-mirror.qiniu.com/fluentd_elasticsearch/fluentd:v2.7.0 3 imagePullPolicy: IfNotPresent #添加镜像策略 4 ……
 
[root@k8smaster01 fluentd-elasticsearch]# cat kibana-deployment.yaml
1 …… 2 image: docker.elastic.co/kibana/kibana-oss:7.3.2 #修改为匹配版本 3 imagePullPolicy: IfNotPresent #添加镜像策略 4 ……
 

5.3 创建持久PVC

[root@k8smaster01 fluentd-elasticsearch]# vi elasticsearch-pvc.yaml
1 apiVersion: v1 2 kind: PersistentVolumeClaim 3 metadata: 4 name: elasticsearch-pvc 5 namespace: kube-system 6 annotations: 7 volume.beta.kubernetes.io/storage-class: ghstorageclass 8 spec: 9 accessModes: 10 - ReadWriteMany 11 resources: 12 requests: 13 storage: 5Gi
[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f elasticsearch-pvc.yaml
5.4 部署elasticsearch
[root@k8smaster01 fluentd-elasticsearch]# vi es-statefulset.yaml
  1 apiVersion: v1
  2 kind: ServiceAccount
  3 metadata:
  4   name: elasticsearch-logging
  5   namespace: kube-system
  6   labels:
  7     k8s-app: elasticsearch-logging
  8     addonmanager.kubernetes.io/mode: Reconcile
  9 ---
 10 kind: ClusterRole
 11 apiVersion: rbac.authorization.k8s.io/v1
 12 metadata:
 13   name: elasticsearch-logging
 14   labels:
 15     k8s-app: elasticsearch-logging
 16     addonmanager.kubernetes.io/mode: Reconcile
 17 rules:
 18 - apiGroups:
 19   - ""
 20   resources:
 21   - "services"
 22   - "namespaces"
 23   - "endpoints"
 24   verbs:
 25   - "get"
 26 ---
 27 kind: ClusterRoleBinding
 28 apiVersion: rbac.authorization.k8s.io/v1
 29 metadata:
 30   namespace: kube-system
 31   name: elasticsearch-logging
 32   labels:
 33     k8s-app: elasticsearch-logging
 34     addonmanager.kubernetes.io/mode: Reconcile
 35 subjects:
 36 - kind: ServiceAccount
 37   name: elasticsearch-logging
 38   namespace: kube-system
 39   apiGroup: ""
 40 roleRef:
 41   kind: ClusterRole
 42   name: elasticsearch-logging
 43   apiGroup: ""
 44 ---
 45 apiVersion: apps/v1
 46 kind: StatefulSet
 47 metadata:
 48   name: elasticsearch-logging
 49   namespace: kube-system
 50   labels:
 51     k8s-app: elasticsearch-logging
 52     version: v7.3.2
 53     addonmanager.kubernetes.io/mode: Reconcile
 54 spec:
 55   serviceName: elasticsearch-logging
 56   replicas: 1
 57   selector:
 58     matchLabels:
 59       k8s-app: elasticsearch-logging
 60       version: v7.3.2
 61   template:
 62     metadata:
 63       labels:
 64         k8s-app: elasticsearch-logging
 65         version: v7.3.2
 66     spec:
 67       serviceAccountName: elasticsearch-logging
 68       containers:
 69       - image: quay-mirror.qiniu.com/fluentd_elasticsearch/elasticsearch:v7.3.2
 70         name: elasticsearch-logging
 71         imagePullPolicy: IfNotPresent
 72         resources:
 73           limits:
 74             cpu: 1000m
 75             memory: 3Gi
 76           requests:
 77             cpu: 100m
 78             memory: 3Gi
 79         ports:
 80         - containerPort: 9200
 81           name: db
 82           protocol: TCP
 83         - containerPort: 9300
 84           name: transport
 85           protocol: TCP
 86         volumeMounts:
 87         - name: elasticsearch-logging
 88           mountPath: https://img.qb5200.com/download-x/data
 89         env:
 90         - name: "NAMESPACE"
 91           valueFrom:
 92             fieldRef:
 93               fieldPath: metadata.namespace
 94       volumes:
 95       - name: elasticsearch-logging			#挂载永久存储PVC
 96         persistentVolumeClaim:
 97           claimName: elasticsearch-pvc
 98       initContainers:
 99       - image: alpine:3.6
100         command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
101         name: elasticsearch-logging-init
102         securityContext:
103           privileged: true
[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f es-statefulset.yaml

5.5 部署Elasticsearch SVC

[root@k8smaster01 fluentd-elasticsearch]# vi es-service.yaml #官方默认即可
  1 apiVersion: v1
  2 kind: Service
  3 metadata:
  4   name: elasticsearch-logging
  5   namespace: kube-system
  6   labels:
  7     k8s-app: elasticsearch-logging
  8     kubernetes.io/cluster-service: "true"
  9     addonmanager.kubernetes.io/mode: Reconcile
 10     kubernetes.io/name: "Elasticsearch"
 11 spec:
 12   ports:
 13   - port: 9200
 14     protocol: TCP
 15     targetPort: db
 16   selector:
 17     k8s-app: elasticsearch-logging
[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f es-service.yaml

5.6 部署fluentd

[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f fluentd-es-configmap.yaml #创建fluentd ConfigMap

[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f fluentd-es-ds.yaml                     #部署fluentd

5.7 部署Kibana

[root@k8smaster01 fluentd-elasticsearch]# vi kibana-deployment.yaml #做如下修改
  1 apiVersion: apps/v1
  2 kind: Deployment
  3 metadata:
  4   name: kibana-logging
  5   namespace: kube-system
  6   labels:
  7     k8s-app: kibana-logging
  8     addonmanager.kubernetes.io/mode: Reconcile
  9 spec:
 10   replicas: 1
 11   selector:
 12     matchLabels:
 13       k8s-app: kibana-logging
 14   template:
 15     metadata:
 16       labels:
 17         k8s-app: kibana-logging
 18       annotations:
 19         seccomp.security.alpha.kubernetes.io/pod: 'dockerhttps://img.qb5200.com/download-x/default'
 20     spec:
 21       containers:
 22       - name: kibana-logging
 23         image: docker.elastic.co/kibana/kibana-oss:7.3.2
 24         imagePullPolicy: IfNotPresent
 25         resources:
 26           limits:
 27             cpu: 1000m
 28           requests:
 29             cpu: 100m
 30         env:
 31           - name: ELASTICSEARCH_HOSTS
 32             value: http://elasticsearch-logging:9200
 33         ports:
 34         - containerPort: 5601
 35           name: ui
 36           protocol: TCP
[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f kibana-deployment.yaml

5.8 部署Kibana SVC

[root@k8smaster01 fluentd-elasticsearch]# vi kibana-service.yaml
  1 apiVersion: v1
  2 kind: Service
  3 metadata:
  4   name: kibana-logging
  5   namespace: kube-system
  6   labels:
  7     k8s-app: kibana-logging
  8     kubernetes.io/cluster-service: "true"
  9     addonmanager.kubernetes.io/mode: Reconcile
 10     kubernetes.io/name: "Kibana"
 11 spec:
 12   type: NodePort
 13   ports:
 14   - port: 5601
 15     protocol: TCP
 16     nodePort: 30003
 17     targetPort: ui
 18   selector:
 19     k8s-app: kibana-logging
[root@k8smaster01 fluentd-elasticsearch]# kubectl create -f kibana-service.yaml
[root@k8smaster01 fluentd-elasticsearch]# kubectl get pods -n kube-system -o wide | grep -E 'NAME|elasticsearch|fluentd|kibana' #查看相关资源

5.9 确认验证

浏览器直接访问:http://172.24.8.100:30003/
 

Copyright 2022 版权所有 软件发布 访问手机版

声明:所有软件和文章来自软件开发商或者作者 如有异议 请与本站联系 联系我们