4月 272020
相较于Deployment资源,DaemonSet在每个节点仅运行一个副本,以提供守护服务。
查看DaemonSet类型的系统组件(kube-proxy和kube-flannel-ds-amd64)
获取kube-system命名空间的daemonset列表
[root@k8s01 ~]# kubectl get daemonsets.apps --namespace=kube-system NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE kube-flannel-ds-amd64 5 5 5 5 5 <none> 6d16h kube-flannel-ds-arm 0 0 0 0 0 <none> 6d16h kube-flannel-ds-arm64 0 0 0 0 0 <none> 6d16h kube-flannel-ds-ppc64le 0 0 0 0 0 <none> 6d16h kube-flannel-ds-s390x 0 0 0 0 0 <none> 6d16h kube-proxy 5 5 5 5 5 kubernetes.io/os=linux 6d16h [root@k8s01 ~]#
获取kube-system命名空间pod列表详情(每个节点都运行一个daemonset类型容器副本)
[root@k8s01 ~]# kubectl get pods --namespace=kube-system -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES coredns-66bff467f8-5x8nf 1/1 Running 0 6d16h 10.244.1.2 k8s02 <none> <none> coredns-66bff467f8-mgcd2 1/1 Running 0 6d16h 10.244.0.2 k8s01 <none> <none> etcd-k8s01 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> kube-apiserver-k8s01 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> kube-controller-manager-k8s01 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> kube-flannel-ds-amd64-4ngbr 1/1 Running 0 6d16h 172.31.6.113 k8s03 <none> <none> kube-flannel-ds-amd64-j9qmh 1/1 Running 0 4d 172.31.1.139 k8s04 <none> <none> kube-flannel-ds-amd64-kmw29 1/1 Running 0 6d16h 172.31.3.249 k8s02 <none> <none> kube-flannel-ds-amd64-l57kp 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> kube-flannel-ds-amd64-rr8sv 1/1 Running 1 4d 172.31.15.1 k8s05 <none> <none> kube-proxy-22fd2 1/1 Running 0 6d16h 172.31.3.249 k8s02 <none> <none> kube-proxy-97hft 1/1 Running 0 4d 172.31.1.139 k8s04 <none> <none> kube-proxy-jwwp2 1/1 Running 0 6d16h 172.31.6.113 k8s03 <none> <none> kube-proxy-mw6xf 1/1 Running 0 4d 172.31.15.1 k8s05 <none> <none> kube-proxy-wnf4q 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> kube-scheduler-k8s01 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> [root@k8s01 ~]#
查看flannel网络组件配置文件中的daemonset配置
[root@k8s01 ~]# vi kube-flannel.yml 134 apiVersion: apps/v1 135 kind: DaemonSet 136 metadata: 137 name: kube-flannel-ds-amd64 138 namespace: kube-system 139 labels: 140 tier: node 141 app: flannel 142 spec: 143 selector: 144 matchLabels: 145 app: flannel 146 template: 147 metadata: 148 labels: 149 tier: node 150 app: flannel 151 spec: 152 affinity: 153 nodeAffinity: 154 requiredDuringSchedulingIgnoredDuringExecution: 155 nodeSelectorTerms: 156 - matchExpressions: 157 - key: kubernetes.io/os 158 operator: In 159 values: 160 - linux 161 - key: kubernetes.io/arch 162 operator: In 163 values: 164 - amd64 165 hostNetwork: true 166 tolerations: 167 - operator: Exists 168 effect: NoSchedule 169 serviceAccountName: flannel 170 initContainers: 171 - name: install-cni 172 image: quay.io/coreos/flannel:v0.12.0-amd64 173 command: 174 - cp 175 args: 176 - -f 177 - /etc/kube-flannel/cni-conf.json 178 - /etc/cni/net.d/10-flannel.conflist 179 volumeMounts: 180 - name: cni 181 mountPath: /etc/cni/net.d 182 - name: flannel-cfg 183 mountPath: /etc/kube-flannel/ 184 containers: 185 - name: kube-flannel 186 image: quay.io/coreos/flannel:v0.12.0-amd64 187 command: 188 - /opt/bin/flanneld 189 args: 190 - --ip-masq 191 - --kube-subnet-mgr 192 resources: 193 requests: 194 cpu: "100m" 195 memory: "50Mi" 196 limits: 197 cpu: "100m" 198 memory: "50Mi" 199 securityContext: 200 privileged: false 201 capabilities: 202 add: ["NET_ADMIN"] 203 env: 204 - name: POD_NAME 205 valueFrom: 206 fieldRef: 207 fieldPath: metadata.name 208 - name: POD_NAMESPACE 209 valueFrom: 210 fieldRef: 211 fieldPath: metadata.namespace 212 volumeMounts: 213 - name: run 214 mountPath: /run/flannel 215 - name: flannel-cfg 216 mountPath: /etc/kube-flannel/ 217 volumes: 218 - name: run 219 hostPath: 220 path: /run/flannel 221 - name: cni 222 hostPath: 223 path: /etc/cni/net.d 224 - name: flannel-cfg 225 configMap: 226 name: kube-flannel-cfg
运行一个daemonset类型的资源(Fluentd日志收集系统)
[root@k8s01 ~]# vi daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: name: fluentd-elasticsearch namespace: kube-system labels: k8s-app: fluentd-logging spec: selector: matchLabels: name: fluentd-elasticsearch template: metadata: labels: name: fluentd-elasticsearch spec: tolerations: # this toleration is to have the daemonset runnable on master nodes # remove it if your masters can't run pods - key: node-role.kubernetes.io/master effect: NoSchedule containers: - name: fluentd-elasticsearch image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 resources: limits: memory: 200Mi requests: cpu: 100m memory: 200Mi volumeMounts: - name: varlog mountPath: /var/log - name: varlibdockercontainers mountPath: /var/lib/docker/containers readOnly: true terminationGracePeriodSeconds: 30 volumes: - name: varlog hostPath: path: /var/log - name: varlibdockercontainers hostPath: path: /var/lib/docker/containers
应用配置文件
[root@k8s01 ~]# kubectl apply -f daemonset.yaml daemonset.apps/fluentd-elasticsearch created [root@k8s01 ~]# kubectl get daemonsets.apps No resources found in default namespace. [root@k8s01 ~]# kubectl get daemonsets.apps --namespace=kube-system NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE fluentd-elasticsearch 5 5 5 5 5 <none> 28s kube-flannel-ds-amd64 5 5 5 5 5 <none> 6d18h kube-flannel-ds-arm 0 0 0 0 0 <none> 6d18h kube-flannel-ds-arm64 0 0 0 0 0 <none> 6d18h kube-flannel-ds-ppc64le 0 0 0 0 0 <none> 6d18h kube-flannel-ds-s390x 0 0 0 0 0 <none> 6d18h kube-proxy 5 5 5 5 5 kubernetes.io/os=linux 6d18h [root@k8s01 ~]#
获取kube-system命名空间的daemonset列表
[root@k8s01 ~]# kubectl get pods --namespace=kube-system -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES coredns-66bff467f8-5x8nf 1/1 Running 0 6d18h 10.244.1.2 k8s02 <none> <none> coredns-66bff467f8-mgcd2 1/1 Running 0 6d18h 10.244.0.2 k8s01 <none> <none> etcd-k8s01 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> fluentd-elasticsearch-64c2h 1/1 Running 0 84s 10.244.5.9 k8s05 <none> <none> fluentd-elasticsearch-f8989 1/1 Running 0 84s 10.244.0.3 k8s01 <none> <none> fluentd-elasticsearch-lcgn7 1/1 Running 0 84s 10.244.3.4 k8s04 <none> <none> fluentd-elasticsearch-ss2zm 1/1 Running 0 84s 10.244.1.20 k8s02 <none> <none> fluentd-elasticsearch-wkd45 1/1 Running 0 84s 10.244.2.39 k8s03 <none> <none> kube-apiserver-k8s01 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> kube-controller-manager-k8s01 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> kube-flannel-ds-amd64-4ngbr 1/1 Running 0 6d18h 172.31.6.113 k8s03 <none> <none> kube-flannel-ds-amd64-j9qmh 1/1 Running 0 4d2h 172.31.1.139 k8s04 <none> <none> kube-flannel-ds-amd64-kmw29 1/1 Running 0 6d18h 172.31.3.249 k8s02 <none> <none> kube-flannel-ds-amd64-l57kp 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> kube-flannel-ds-amd64-rr8sv 1/1 Running 1 4d2h 172.31.15.1 k8s05 <none> <none> kube-proxy-22fd2 1/1 Running 0 6d18h 172.31.3.249 k8s02 <none> <none> kube-proxy-97hft 1/1 Running 0 4d2h 172.31.1.139 k8s04 <none> <none> kube-proxy-jwwp2 1/1 Running 0 6d18h 172.31.6.113 k8s03 <none> <none> kube-proxy-mw6xf 1/1 Running 0 4d2h 172.31.15.1 k8s05 <none> <none> kube-proxy-wnf4q 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> kube-scheduler-k8s01 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> [root@k8s01 ~]#