#!/bin/bash # # Disable SELinux & firewalld sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config; setenforce 0; systemctl disable firewalld; systemctl stop firewalld; # Add User useradd ops; usermod -aG wheel ops; echo "rancherpwd" | passwd --stdin ops; useradd deployer; echo "rancherpwd" | passwd --stdin deployer; # Use containerd as CRI runtime # https://v1-19.docs.kubernetes.io/docs/setup/production-environment/container-runtimes/ cat <<EOF | tee /etc/modules-load.d/containerd.conf overlay br_netfilter EOF modprobe overlay; modprobe br_netfilter; cat <<EOF | tee /etc/sysctl.d/99-kubernetes-cri.conf net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-ip6tables = 1 EOF sudo sysctl --system; # Install Docker CE # https://docs.docker.com/engine/install/centos/ # dnf list docker-ce --showduplicates | sort -r dnf makecache; yum install -y yum-utils device-mapper-persistent-data lvm2 iptables; yum-config-manager \ --add-repo https://download.docker.com/linux/centos/docker-ce.repo; dnf makecache; yum -y install docker-ce-19.03.15 docker-ce-cli-19.03.15 containerd.io; # Configure the Docker daemon https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker mkdir /etc/docker; # Set up the Docker daemon # http://mirrors.ustc.edu.cn/help/dockerhub.html # https://help.aliyun.com/document_detail/60750.html cat <<EOF | tee /etc/docker/daemon.json { "registry-mirrors": ["https://o9w8d6uk.mirror.aliyuncs.com"], "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ] } EOF # Restart Docker and enable on boot systemctl enable docker; systemctl daemon-reload; systemctl start docker; # usermod -aG docker deployer;
Kubernetes集群中的Service从逻辑上代表了一组Pod,并通过label建立与pod的关联
准备Deployment配置文件
[root@k8s-01 ~]# vi httpd-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: httpd spec: replicas: 3 selector: matchLabels: run: httpd template: metadata: labels: run: httpd spec: containers: - name: httpd image: httpd:2.4.41 ports: - containerPort: 80 [root@k8s-01 ~]# kubectl apply -f httpd-deployment.yaml deployment.apps/httpd created [root@k8s-01 ~]#
获取集群pod列表详情
[root@k8s-01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES httpd-5bb8cdb99c-g5m95 1/1 Running 0 4m29s 10.244.2.3 k8s-03 <none> <none> httpd-5bb8cdb99c-hzjqd 1/1 Running 0 4m29s 10.244.1.3 k8s-02 <none> <none> httpd-5bb8cdb99c-s4q25 1/1 Running 0 4m29s 10.244.1.4 k8s-02 <none> <none> [root@k8s-01 ~]#
使用CURL模拟浏览器请求pod的IP地址(Pod的IP地址只能被集群中的容器和节点访问到)
[root@k8s-01 ~]# curl 10.244.2.3 <html><body><h1>It works!</h1></body></html> [root@k8s-01 ~]# curl 10.244.1.3 <html><body><h1>It works!</h1></body></html> [root@k8s-01 ~]# curl 10.244.1.4 <html><body><h1>It works!</h1></body></html> [root@k8s-01 ~]# [root@k8s-02 ~]# curl 10.244.2.3 <html><body><h1>It works!</h1></body></html> [root@k8s-02 ~]# curl 10.244.1.3 <html><body><h1>It works!</h1></body></html> [root@k8s-02 ~]# curl 10.244.1.4 <html><body><h1>It works!</h1></body></html> [root@k8s-02 ~]# [root@k8s-03 ~]# curl 10.244.2.3 <html><body><h1>It works!</h1></body></html> [root@k8s-03 ~]# curl 10.244.1.3 <html><body><h1>It works!</h1></body></html> [root@k8s-03 ~]# curl 10.244.1.4 <html><body><h1>It works!</h1></body></html> [root@k8s-03 ~]#
对Pod IP进行PING测试
[root@k8s-01 ~]# ping -c 2 10.244.2.3 PING 10.244.2.3 (10.244.2.3) 56(84) bytes of data. 64 bytes from 10.244.2.3: icmp_seq=1 ttl=63 time=2.03 ms 64 bytes from 10.244.2.3: icmp_seq=2 ttl=63 time=0.660 ms --- 10.244.2.3 ping statistics --- 2 packets transmitted, 2 received, 0% packet loss, time 1001ms rtt min/avg/max/mdev = 0.660/1.348/2.036/0.688 ms [root@k8s-01 ~]# ping -c 2 10.244.1.3 PING 10.244.1.3 (10.244.1.3) 56(84) bytes of data. 64 bytes from 10.244.1.3: icmp_seq=1 ttl=63 time=1.58 ms 64 bytes from 10.244.1.3: icmp_seq=2 ttl=63 time=0.641 ms --- 10.244.1.3 ping statistics --- 2 packets transmitted, 2 received, 0% packet loss, time 1001ms rtt min/avg/max/mdev = 0.641/1.115/1.589/0.474 ms [root@k8s-01 ~]# ping -c 2 10.244.1.4 PING 10.244.1.4 (10.244.1.4) 56(84) bytes of data. 64 bytes from 10.244.1.4: icmp_seq=1 ttl=63 time=0.658 ms 64 bytes from 10.244.1.4: icmp_seq=2 ttl=63 time=0.483 ms --- 10.244.1.4 ping statistics --- 2 packets transmitted, 2 received, 0% packet loss, time 1000ms rtt min/avg/max/mdev = 0.483/0.570/0.658/0.090 ms [root@k8s-01 ~]#
创建服务Service配置文件
[root@k8s-01 ~]# vi httpd-service.yaml apiVersion: v1 kind: Service metadata: name: httpd-service spec: selector: run: httpd ports: - protocol: TCP port: 8080 targetPort: 80 [root@k8s-01 ~]# kubectl apply -f httpd-service.yaml service/httpd-service created [root@k8s-01 ~]#
获取集群Service列表详情
[root@k8s-01 ~]# kubectl get services -o wide NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR httpd-service ClusterIP 10.109.145.140 <none> 8080/TCP 4m9s run=httpd kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 10m <none> [root@k8s-01 ~]#
尝试ping集群IP地址(默认无法ping通)
[root@k8s-01 ~]# ping 10.109.145.140 PING 10.109.145.140 (10.109.145.140) 56(84) bytes of data. ^C --- 10.109.145.140 ping statistics --- 3 packets transmitted, 0 received, 100% packet loss, time 1999ms [root@k8s-01 ~]#
使用Service获得的集群IP访问具有run=httpd标签的后端Pod及容器
[root@k8s-01 ~]# curl 10.109.145.140:8080 <html><body><h1>It works!</h1></body></html> [root@k8s-01 ~]# curl 10.109.145.140:8080 <html><body><h1>It works!</h1></body></html> [root@k8s-01 ~]# curl 10.109.145.140:8080 <html><body><h1>It works!</h1></body></html> [root@k8s-01 ~]# curl -I 10.109.145.140:8080 HTTP/1.1 200 OK Date: Wed, 06 May 2020 07:24:57 GMT Server: Apache/2.4.41 (Unix) Last-Modified: Mon, 11 Jun 2007 18:53:14 GMT ETag: "2d-432a5e4a73a80" Accept-Ranges: bytes Content-Length: 45 Content-Type: text/html [root@k8s-01 ~]#
获取服务详情以确认Cluster IP指向的后端Pod IP信息
[root@k8s-01 ~]# kubectl describe services httpd-service Name: httpd-service Namespace: default Labels: <none> Annotations: kubectl.kubernetes.io/last-applied-configuration: {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"name":"httpd-service","namespace":"default"},"spec":{"ports":[{"port":80... Selector: run=httpd Type: ClusterIP IP: 10.109.145.140 Port: <unset> 8080/TCP TargetPort: 80/TCP Endpoints: 10.244.1.3:80,10.244.1.4:80,10.244.2.3:80 Session Affinity: None Events: <none> [root@k8s-01 ~]# [root@k8s-01 ~]# kubectl get endpoints httpd-service NAME ENDPOINTS AGE httpd-service 10.244.1.3:80,10.244.1.4:80,10.244.2.3:80 5m23s [root@k8s-01 ~]#
相较于Deployment资源,DaemonSet在每个节点仅运行一个副本,以提供守护服务。
查看DaemonSet类型的系统组件(kube-proxy和kube-flannel-ds-amd64)
获取kube-system命名空间的daemonset列表
[root@k8s01 ~]# kubectl get daemonsets.apps --namespace=kube-system NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE kube-flannel-ds-amd64 5 5 5 5 5 <none> 6d16h kube-flannel-ds-arm 0 0 0 0 0 <none> 6d16h kube-flannel-ds-arm64 0 0 0 0 0 <none> 6d16h kube-flannel-ds-ppc64le 0 0 0 0 0 <none> 6d16h kube-flannel-ds-s390x 0 0 0 0 0 <none> 6d16h kube-proxy 5 5 5 5 5 kubernetes.io/os=linux 6d16h [root@k8s01 ~]#
获取kube-system命名空间pod列表详情(每个节点都运行一个daemonset类型容器副本)
[root@k8s01 ~]# kubectl get pods --namespace=kube-system -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES coredns-66bff467f8-5x8nf 1/1 Running 0 6d16h 10.244.1.2 k8s02 <none> <none> coredns-66bff467f8-mgcd2 1/1 Running 0 6d16h 10.244.0.2 k8s01 <none> <none> etcd-k8s01 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> kube-apiserver-k8s01 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> kube-controller-manager-k8s01 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> kube-flannel-ds-amd64-4ngbr 1/1 Running 0 6d16h 172.31.6.113 k8s03 <none> <none> kube-flannel-ds-amd64-j9qmh 1/1 Running 0 4d 172.31.1.139 k8s04 <none> <none> kube-flannel-ds-amd64-kmw29 1/1 Running 0 6d16h 172.31.3.249 k8s02 <none> <none> kube-flannel-ds-amd64-l57kp 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> kube-flannel-ds-amd64-rr8sv 1/1 Running 1 4d 172.31.15.1 k8s05 <none> <none> kube-proxy-22fd2 1/1 Running 0 6d16h 172.31.3.249 k8s02 <none> <none> kube-proxy-97hft 1/1 Running 0 4d 172.31.1.139 k8s04 <none> <none> kube-proxy-jwwp2 1/1 Running 0 6d16h 172.31.6.113 k8s03 <none> <none> kube-proxy-mw6xf 1/1 Running 0 4d 172.31.15.1 k8s05 <none> <none> kube-proxy-wnf4q 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> kube-scheduler-k8s01 1/1 Running 0 6d16h 172.31.14.12 k8s01 <none> <none> [root@k8s01 ~]#
查看flannel网络组件配置文件中的daemonset配置
[root@k8s01 ~]# vi kube-flannel.yml 134 apiVersion: apps/v1 135 kind: DaemonSet 136 metadata: 137 name: kube-flannel-ds-amd64 138 namespace: kube-system 139 labels: 140 tier: node 141 app: flannel 142 spec: 143 selector: 144 matchLabels: 145 app: flannel 146 template: 147 metadata: 148 labels: 149 tier: node 150 app: flannel 151 spec: 152 affinity: 153 nodeAffinity: 154 requiredDuringSchedulingIgnoredDuringExecution: 155 nodeSelectorTerms: 156 - matchExpressions: 157 - key: kubernetes.io/os 158 operator: In 159 values: 160 - linux 161 - key: kubernetes.io/arch 162 operator: In 163 values: 164 - amd64 165 hostNetwork: true 166 tolerations: 167 - operator: Exists 168 effect: NoSchedule 169 serviceAccountName: flannel 170 initContainers: 171 - name: install-cni 172 image: quay.io/coreos/flannel:v0.12.0-amd64 173 command: 174 - cp 175 args: 176 - -f 177 - /etc/kube-flannel/cni-conf.json 178 - /etc/cni/net.d/10-flannel.conflist 179 volumeMounts: 180 - name: cni 181 mountPath: /etc/cni/net.d 182 - name: flannel-cfg 183 mountPath: /etc/kube-flannel/ 184 containers: 185 - name: kube-flannel 186 image: quay.io/coreos/flannel:v0.12.0-amd64 187 command: 188 - /opt/bin/flanneld 189 args: 190 - --ip-masq 191 - --kube-subnet-mgr 192 resources: 193 requests: 194 cpu: "100m" 195 memory: "50Mi" 196 limits: 197 cpu: "100m" 198 memory: "50Mi" 199 securityContext: 200 privileged: false 201 capabilities: 202 add: ["NET_ADMIN"] 203 env: 204 - name: POD_NAME 205 valueFrom: 206 fieldRef: 207 fieldPath: metadata.name 208 - name: POD_NAMESPACE 209 valueFrom: 210 fieldRef: 211 fieldPath: metadata.namespace 212 volumeMounts: 213 - name: run 214 mountPath: /run/flannel 215 - name: flannel-cfg 216 mountPath: /etc/kube-flannel/ 217 volumes: 218 - name: run 219 hostPath: 220 path: /run/flannel 221 - name: cni 222 hostPath: 223 path: /etc/cni/net.d 224 - name: flannel-cfg 225 configMap: 226 name: kube-flannel-cfg
运行一个daemonset类型的资源(Fluentd日志收集系统)
[root@k8s01 ~]# vi daemonset.yaml apiVersion: apps/v1 kind: DaemonSet metadata: name: fluentd-elasticsearch namespace: kube-system labels: k8s-app: fluentd-logging spec: selector: matchLabels: name: fluentd-elasticsearch template: metadata: labels: name: fluentd-elasticsearch spec: tolerations: # this toleration is to have the daemonset runnable on master nodes # remove it if your masters can't run pods - key: node-role.kubernetes.io/master effect: NoSchedule containers: - name: fluentd-elasticsearch image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 resources: limits: memory: 200Mi requests: cpu: 100m memory: 200Mi volumeMounts: - name: varlog mountPath: /var/log - name: varlibdockercontainers mountPath: /var/lib/docker/containers readOnly: true terminationGracePeriodSeconds: 30 volumes: - name: varlog hostPath: path: /var/log - name: varlibdockercontainers hostPath: path: /var/lib/docker/containers
应用配置文件
[root@k8s01 ~]# kubectl apply -f daemonset.yaml daemonset.apps/fluentd-elasticsearch created [root@k8s01 ~]# kubectl get daemonsets.apps No resources found in default namespace. [root@k8s01 ~]# kubectl get daemonsets.apps --namespace=kube-system NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE fluentd-elasticsearch 5 5 5 5 5 <none> 28s kube-flannel-ds-amd64 5 5 5 5 5 <none> 6d18h kube-flannel-ds-arm 0 0 0 0 0 <none> 6d18h kube-flannel-ds-arm64 0 0 0 0 0 <none> 6d18h kube-flannel-ds-ppc64le 0 0 0 0 0 <none> 6d18h kube-flannel-ds-s390x 0 0 0 0 0 <none> 6d18h kube-proxy 5 5 5 5 5 kubernetes.io/os=linux 6d18h [root@k8s01 ~]#
获取kube-system命名空间的daemonset列表
[root@k8s01 ~]# kubectl get pods --namespace=kube-system -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES coredns-66bff467f8-5x8nf 1/1 Running 0 6d18h 10.244.1.2 k8s02 <none> <none> coredns-66bff467f8-mgcd2 1/1 Running 0 6d18h 10.244.0.2 k8s01 <none> <none> etcd-k8s01 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> fluentd-elasticsearch-64c2h 1/1 Running 0 84s 10.244.5.9 k8s05 <none> <none> fluentd-elasticsearch-f8989 1/1 Running 0 84s 10.244.0.3 k8s01 <none> <none> fluentd-elasticsearch-lcgn7 1/1 Running 0 84s 10.244.3.4 k8s04 <none> <none> fluentd-elasticsearch-ss2zm 1/1 Running 0 84s 10.244.1.20 k8s02 <none> <none> fluentd-elasticsearch-wkd45 1/1 Running 0 84s 10.244.2.39 k8s03 <none> <none> kube-apiserver-k8s01 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> kube-controller-manager-k8s01 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> kube-flannel-ds-amd64-4ngbr 1/1 Running 0 6d18h 172.31.6.113 k8s03 <none> <none> kube-flannel-ds-amd64-j9qmh 1/1 Running 0 4d2h 172.31.1.139 k8s04 <none> <none> kube-flannel-ds-amd64-kmw29 1/1 Running 0 6d18h 172.31.3.249 k8s02 <none> <none> kube-flannel-ds-amd64-l57kp 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> kube-flannel-ds-amd64-rr8sv 1/1 Running 1 4d2h 172.31.15.1 k8s05 <none> <none> kube-proxy-22fd2 1/1 Running 0 6d18h 172.31.3.249 k8s02 <none> <none> kube-proxy-97hft 1/1 Running 0 4d2h 172.31.1.139 k8s04 <none> <none> kube-proxy-jwwp2 1/1 Running 0 6d18h 172.31.6.113 k8s03 <none> <none> kube-proxy-mw6xf 1/1 Running 0 4d2h 172.31.15.1 k8s05 <none> <none> kube-proxy-wnf4q 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> kube-scheduler-k8s01 1/1 Running 0 6d18h 172.31.14.12 k8s01 <none> <none> [root@k8s01 ~]#
获取当前集群pod列表及所属节点
[root@k8s01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-bbfdbf4b7-8khd4 1/1 Running 0 3d23h 10.244.2.35 k8s03 <none> <none> nginx-deployment-bbfdbf4b7-9g825 1/1 Running 0 3d23h 10.244.1.17 k8s02 <none> <none> nginx-deployment-bbfdbf4b7-hsvfg 1/1 Running 0 3d23h 10.244.2.36 k8s03 <none> <none> nginx-deployment-bbfdbf4b7-jpt96 1/1 Running 0 3d23h 10.244.2.34 k8s03 <none> <none> nginx-deployment-bbfdbf4b7-vlnlk 1/1 Running 0 3d23h 10.244.1.18 k8s02 <none> <none> [root@k8s01 ~]# kubectl get deployments NAME READY UP-TO-DATE AVAILABLE AGE nginx-deployment 5/5 5 5 5d15h [root@k8s01 ~]#
删除nginx-deployment资源
[root@k8s01 ~]# kubectl delete deployments.apps nginx-deployment deployment.apps "nginx-deployment" deleted [root@k8s01 ~]# kubectl get pods No resources found in default namespace. [root@k8s01 ~]#
获取节点列表
[root@k8s01 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s01 Ready master 6d15h v1.18.2 k8s02 Ready <none> 6d15h v1.18.2 k8s03 Ready <none> 6d15h v1.18.2 k8s04 Ready <none> 3d23h v1.18.2 k8s05 Ready <none> 3d23h v1.18.2 [root@k8s01 ~]#
应用nginx-deployment配置文件
[root@k8s01 ~]# cat nginx-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment labels: app: nginx spec: replicas: 2 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:1.17.10 ports: - containerPort: 80 [root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml deployment.apps/nginx-deployment created [root@k8s01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-cc5db57d4-dvr4p 1/1 Running 0 11s 10.244.2.37 k8s03 <none> <none> nginx-deployment-cc5db57d4-fnq9c 1/1 Running 0 11s 10.244.3.2 k8s04 <none> <none> [root@k8s01 ~]#
获取节点的默认标签配置信息
[root@k8s01 ~]# kubectl get nodes --show-labels NAME STATUS ROLES AGE VERSION LABELS k8s01 Ready master 6d15h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master= k8s02 Ready <none> 6d15h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux k8s03 Ready <none> 6d15h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux k8s04 Ready <none> 3d23h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux k8s05 Ready <none> 3d23h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux [root@k8s01 ~]#
对指定节点添加标签键值对
[root@k8s01 ~]# kubectl label nodes k8s05 disktype=ssd node/k8s05 labeled [root@k8s01 ~]# kubectl get nodes --show-labels NAME STATUS ROLES AGE VERSION LABELS k8s01 Ready master 6d15h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master= k8s02 Ready <none> 6d15h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux k8s03 Ready <none> 6d15h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux k8s04 Ready <none> 3d23h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux k8s05 Ready <none> 3d23h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux [root@k8s01 ~]#
修改deployment配置文件添加关联标签
[root@k8s01 ~]# vi nginx-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment labels: app: nginx spec: replicas: 6 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:1.17.10 ports: - containerPort: 80 nodeSelector: disktype: ssd
应用配置文件执行销毁原有pod并调度新pod资源到节点k8s05上
[root@k8s01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-cc5db57d4-5lzsz 1/1 Running 0 12s 10.244.3.3 k8s04 <none> <none> nginx-deployment-cc5db57d4-dvr4p 1/1 Running 0 9m53s 10.244.2.37 k8s03 <none> <none> nginx-deployment-cc5db57d4-fnq9c 1/1 Running 0 9m53s 10.244.3.2 k8s04 <none> <none> nginx-deployment-cc5db57d4-hwmk4 1/1 Running 0 12s 10.244.1.19 k8s02 <none> <none> nginx-deployment-cc5db57d4-qt26r 1/1 Running 0 12s 10.244.2.38 k8s03 <none> <none> nginx-deployment-ddc6847d-4qx2m 0/1 ContainerCreating 0 12s <none> k8s05 <none> <none> nginx-deployment-ddc6847d-cvhv4 0/1 ContainerCreating 0 12s <none> k8s05 <none> <none> nginx-deployment-ddc6847d-dcztn 0/1 ContainerCreating 0 12s <none> k8s05 <none> <none> [root@k8s01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-cc5db57d4-dvr4p 0/1 Terminating 0 10m 10.244.2.37 k8s03 <none> <none> nginx-deployment-cc5db57d4-fnq9c 0/1 Terminating 0 10m 10.244.3.2 k8s04 <none> <none> nginx-deployment-ddc6847d-26hl9 1/1 Running 0 13s 10.244.5.7 k8s05 <none> <none> nginx-deployment-ddc6847d-4qx2m 1/1 Running 0 26s 10.244.5.3 k8s05 <none> <none> nginx-deployment-ddc6847d-cvhv4 1/1 Running 0 26s 10.244.5.4 k8s05 <none> <none> nginx-deployment-ddc6847d-d6f99 1/1 Running 0 14s 10.244.5.6 k8s05 <none> <none> nginx-deployment-ddc6847d-dcztn 1/1 Running 0 26s 10.244.5.5 k8s05 <none> <none> nginx-deployment-ddc6847d-dj5x4 1/1 Running 0 12s 10.244.5.8 k8s05 <none> <none> [root@k8s01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-ddc6847d-26hl9 1/1 Running 0 21s 10.244.5.7 k8s05 <none> <none> nginx-deployment-ddc6847d-4qx2m 1/1 Running 0 34s 10.244.5.3 k8s05 <none> <none> nginx-deployment-ddc6847d-cvhv4 1/1 Running 0 34s 10.244.5.4 k8s05 <none> <none> nginx-deployment-ddc6847d-d6f99 1/1 Running 0 22s 10.244.5.6 k8s05 <none> <none> nginx-deployment-ddc6847d-dcztn 1/1 Running 0 34s 10.244.5.5 k8s05 <none> <none> nginx-deployment-ddc6847d-dj5x4 1/1 Running 0 20s 10.244.5.8 k8s05 <none> <none> [root@k8s01 ~]#
删除lable标签配置
[root@k8s01 ~]# kubectl label nodes k8s05 disktype- node/k8s05 labeled [root@k8s01 ~]# kubectl get nodes --show-labels NAME STATUS ROLES AGE VERSION LABELS k8s01 Ready master 6d15h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s01,kubernetes.io/os=linux,node-role.kubernetes.io/master= k8s02 Ready <none> 6d15h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s02,kubernetes.io/os=linux k8s03 Ready <none> 6d15h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s03,kubernetes.io/os=linux k8s04 Ready <none> 3d23h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s04,kubernetes.io/os=linux k8s05 Ready <none> 3d23h v1.18.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s05,kubernetes.io/os=linux [root@k8s01 ~]#
为集群新增节点
172.31.3.209 k8s01
172.31.8.132 k8s02
172.31.10.229 k8s03
172.31.1.139 k8s04
172.31.15.1 k8s05
新节点加入集群
kubeadm join --token <token> <control-plane-host>:<control-plane-port> --discovery-token-ca-cert-hash sha256:<hash>
主节点生成token有效期为24小时,超过该有效期后需要另行生成。
查看现有token列表
[root@k8s01 ~]# kubeadm token list
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
ca673s.97ektx8klpsjfovt 8h 2020-04-23T10:35:25Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
qxycbf.ri8i2zygahp5je8m 8h 2020-04-23T10:35:43Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
[root@k8s01 ~]#
重新生成token
[root@k8s01 ~]# kubeadm token create
W0423 02:26:28.166475 9469 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
lf1qej.q4wq7xo23xigg672
[root@k8s01 ~]# kubeadm token list
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
ca673s.97ektx8klpsjfovt 8h 2020-04-23T10:35:25Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
lf1qej.q4wq7xo23xigg672 23h 2020-04-24T02:26:28Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
qxycbf.ri8i2zygahp5je8m 8h 2020-04-23T10:35:43Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
[root@k8s01 ~]#
重新生成hash值(该值不变)
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sed 's/^.* //'
[root@k8s01 ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
> openssl dgst -sha256 -hex | sed 's/^.* //'
d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
[root@k8s01 ~]#
节点4加入
[root@k8s04 ~]# kubeadm join --token lf1qej.q4wq7xo23xigg672 172.31.14.12:6443 --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0423 02:28:44.283472 19177 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@k8s04 ~]#
节点5加入
[root@k8s05 ~]# kubeadm join --token lf1qej.q4wq7xo23xigg672 172.31.14.12:6443 --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0423 02:28:51.716851 19271 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@k8s05 ~]#
获取节点列表(加入成功)
[root@k8s01 ~]# kubectl get nodes -o wide NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME k8s01 Ready master 2d16h v1.18.2 172.31.14.12 <none> CentOS Linux 7 (Core) 3.10.0-1062.12.1.el7.x86_64 docker://19.3.8 k8s02 Ready <none> 2d16h v1.18.2 172.31.3.249 <none> CentOS Linux 7 (Core) 3.10.0-1062.12.1.el7.x86_64 docker://19.3.8 k8s03 Ready <none> 2d16h v1.18.2 172.31.6.113 <none> CentOS Linux 7 (Core) 3.10.0-1062.12.1.el7.x86_64 docker://19.3.8 k8s04 Ready <none> 78s v1.18.2 172.31.1.139 <none> CentOS Linux 7 (Core) 3.10.0-1062.12.1.el7.x86_64 docker://19.3.8 k8s05 Ready <none> 70s v1.18.2 172.31.15.1 <none> CentOS Linux 7 (Core) 3.10.0-1062.12.1.el7.x86_64 docker://19.3.8 [root@k8s01 ~]#
创建新token并生成完整节点加入命令(一次性)
[root@k8s01 ~]# kubeadm token list TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS ca673s.97ektx8klpsjfovt 7h 2020-04-23T10:35:25Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token lf1qej.q4wq7xo23xigg672 23h 2020-04-24T02:26:28Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token qxycbf.ri8i2zygahp5je8m 7h 2020-04-23T10:35:43Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token [root@k8s01 ~]# kubeadm token create --print-join-command W0423 02:41:47.487117 15377 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io] kubeadm join 172.31.14.12:6443 --token vc6toc.jhhp9jatexn4ed7m --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859 [root@k8s01 ~]# kubeadm token list TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS ca673s.97ektx8klpsjfovt 7h 2020-04-23T10:35:25Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token lf1qej.q4wq7xo23xigg672 23h 2020-04-24T02:26:28Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token qxycbf.ri8i2zygahp5je8m 7h 2020-04-23T10:35:43Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token vc6toc.jhhp9jatexn4ed7m 23h 2020-04-24T02:41:47Z authentication,signing <none> system:bootstrappers:kubeadm:default-node-token [root@k8s01 ~]#
获取pod列表并查看pod运行的节点
[root@k8s01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-cc5db57d4-5q9lz 1/1 Running 0 22h 10.244.2.17 k8s03 <none> <none> nginx-deployment-cc5db57d4-dncbs 1/1 Running 0 22h 10.244.1.10 k8s02 <none> <none> nginx-deployment-cc5db57d4-gsp6l 1/1 Running 0 22h 10.244.2.16 k8s03 <none> <none> [root@k8s01 ~]#
修改副本数量为5并再次应用deployment配置(扩容)
[root@k8s01 ~]# vi nginx-deployment.yaml replicas: 5 [root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml deployment.apps/nginx-deployment configured [root@k8s01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-cc5db57d4-5q9lz 1/1 Running 0 23h 10.244.2.17 k8s03 <none> <none> nginx-deployment-cc5db57d4-clrlh 1/1 Running 0 9s 10.244.2.18 k8s03 <none> <none> nginx-deployment-cc5db57d4-dncbs 1/1 Running 0 23h 10.244.1.10 k8s02 <none> <none> nginx-deployment-cc5db57d4-gsp6l 1/1 Running 0 23h 10.244.2.16 k8s03 <none> <none> nginx-deployment-cc5db57d4-ndkr7 1/1 Running 0 9s 10.244.1.11 k8s02 <none> <none> [root@k8s01 ~]#
修改副本数量为2并再次应用deployment配置(缩容)
[root@k8s01 ~]# vi nginx-deployment.yaml replicas: 2 [root@k8s01 ~]# kubectl apply -f nginx-deployment.yaml deployment.apps/nginx-deployment configured [root@k8s01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-cc5db57d4-clrlh 0/1 Terminating 0 4m50s 10.244.2.18 k8s03 <none> <none> nginx-deployment-cc5db57d4-dncbs 1/1 Running 0 23h 10.244.1.10 k8s02 <none> <none> nginx-deployment-cc5db57d4-gsp6l 0/1 Terminating 0 23h 10.244.2.16 k8s03 <none> <none> nginx-deployment-cc5db57d4-ndkr7 1/1 Running 0 4m50s 10.244.1.11 k8s02 <none> <none> [root@k8s01 ~]# [root@k8s01 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-deployment-cc5db57d4-dncbs 1/1 Running 0 23h 10.244.1.10 k8s02 <none> <none> nginx-deployment-cc5db57d4-ndkr7 1/1 Running 0 22m 10.244.1.11 k8s02 <none> <none> [root@k8s01 ~]#
基础环境安装脚本(基于Amazon AWS EC2 CentOS 7环境)
#!/bin/bash # # setenforce 0; sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config; # cat <<EOF > /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sysctl -p; # yum makecache; yum install -y yum-utils; yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo; yum install -y docker-ce docker-ce-cli containerd.io; # cat <<EOF >> /etc/hosts 172.31.3.209 k8s01 172.31.8.132 k8s02 172.31.10.229 k8s03 EOF # mkdir /etc/docker; cat <<EOF > /etc/docker/daemon.json { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ] } EOF # systemctl daemon-reload; systemctl enable docker; systemctl restart docker; # cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg EOF # yum install -y kubectl kubelet kubeadm; systemctl enable kubelet;
执行脚本
[root@k8s01 ~]# vi deploy.sh [root@k8s01 ~]# chmod 700 deploy.sh [root@k8s01 ~]# ./deploy.sh
初始化master节点
kubeadm init --apiserver-advertise-address=172.31.14.12 --pod-network-cidr=10.244.0.0/16 [root@k8s01 ~]# kubeadm init --apiserver-advertise-address=172.31.14.12 --pod-network-cidr=10.244.0.0/16
配置本地命令环境
[root@k8s01 ~]# mkdir -p $HOME/.kube [root@k8s01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@k8s01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
节点加入集群
kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \ --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
节点2加入
[root@k8s02 ~]# kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \ > --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859 W0420 10:23:48.432125 9198 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set. [preflight] Running pre-flight checks [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the control-plane to see this node join the cluster. [root@k8s02 ~]#
节点3加入
[root@k8s03 ~]# kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \ > --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859 W0420 10:24:14.829097 9202 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set. [preflight] Running pre-flight checks [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the control-plane to see this node join the cluster. [root@k8s03 ~]#
安装flannel网络
[root@k8s01 ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml podsecuritypolicy.policy/psp.flannel.unprivileged created clusterrole.rbac.authorization.k8s.io/flannel created clusterrolebinding.rbac.authorization.k8s.io/flannel created serviceaccount/flannel created configmap/kube-flannel-cfg created daemonset.apps/kube-flannel-ds-amd64 created daemonset.apps/kube-flannel-ds-arm64 created daemonset.apps/kube-flannel-ds-arm created daemonset.apps/kube-flannel-ds-ppc64le created daemonset.apps/kube-flannel-ds-s390x created [root@k8s01 ~]#
获取节点信息
[root@k8s01 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s01 Ready master 12m v1.18.2 k8s02 Ready <none> 10m v1.18.2 k8s03 Ready <none> 10m v1.18.2 [root@k8s01 ~]#
查看集群组件状态
[root@k8s01 ~]# kubectl get cs NAME STATUS MESSAGE ERROR scheduler Healthy ok controller-manager Healthy ok etcd-0 Healthy {"health":"true"} [root@k8s01 ~]#
查看本地镜像列表
[root@k8s01 ~]# docker image ls REPOSITORY TAG IMAGE ID CREATED SIZE k8s.gcr.io/kube-proxy v1.18.2 0d40868643c6 3 days ago 117MB k8s.gcr.io/kube-scheduler v1.18.2 a3099161e137 3 days ago 95.3MB k8s.gcr.io/kube-apiserver v1.18.2 6ed75ad404bd 3 days ago 173MB k8s.gcr.io/kube-controller-manager v1.18.2 ace0a8c17ba9 3 days ago 162MB quay.io/coreos/flannel v0.12.0-amd64 4e9f801d2217 5 weeks ago 52.8MB k8s.gcr.io/pause 3.2 80d28bedfe5d 2 months ago 683kB k8s.gcr.io/coredns 1.6.7 67da37a9a360 2 months ago 43.8MB k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 5 months ago 288MB [root@k8s01 ~]#
主要组件服务配置文件
[root@k8s-01 ~]# ls /etc/kubernetes/manifests/ etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml kube-scheduler.yaml [root@k8s-01 ~]#
etcd.yaml
[root@k8s-01 ~]# cat /etc/kubernetes/manifests/etcd.yaml apiVersion: v1 kind: Pod metadata: creationTimestamp: null labels: component: etcd tier: control-plane name: etcd namespace: kube-system spec: containers: - command: - etcd - --advertise-client-urls=https://172.31.43.3:2379 - --cert-file=/etc/kubernetes/pki/etcd/server.crt - --client-cert-auth=true - --data-dir=/var/lib/etcd - --initial-advertise-peer-urls=https://172.31.43.3:2380 - --initial-cluster=k8s-01=https://172.31.43.3:2380 - --key-file=/etc/kubernetes/pki/etcd/server.key - --listen-client-urls=https://127.0.0.1:2379,https://172.31.43.3:2379 - --listen-metrics-urls=http://127.0.0.1:2381 - --listen-peer-urls=https://172.31.43.3:2380 - --name=k8s-01 - --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt - --peer-client-cert-auth=true - --peer-key-file=/etc/kubernetes/pki/etcd/peer.key - --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt - --snapshot-count=10000 - --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt image: k8s.gcr.io/etcd:3.4.3-0 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 8 httpGet: host: 127.0.0.1 path: /health port: 2381 scheme: HTTP initialDelaySeconds: 15 timeoutSeconds: 15 name: etcd resources: {} volumeMounts: - mountPath: /var/lib/etcd name: etcd-data - mountPath: /etc/kubernetes/pki/etcd name: etcd-certs hostNetwork: true priorityClassName: system-cluster-critical volumes: - hostPath: path: /etc/kubernetes/pki/etcd type: DirectoryOrCreate name: etcd-certs - hostPath: path: /var/lib/etcd type: DirectoryOrCreate name: etcd-data status: {} [root@k8s-01 ~]#
kube-apiserver.yaml
[root@k8s-01 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml apiVersion: v1 kind: Pod metadata: creationTimestamp: null labels: component: kube-apiserver tier: control-plane name: kube-apiserver namespace: kube-system spec: containers: - command: - kube-apiserver - --advertise-address=172.31.43.3 - --allow-privileged=true - --authorization-mode=Node,RBAC - --client-ca-file=/etc/kubernetes/pki/ca.crt - --enable-admission-plugins=NodeRestriction - --enable-bootstrap-token-auth=true - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key - --etcd-servers=https://127.0.0.1:2379 - --insecure-port=0 - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key - --requestheader-allowed-names=front-proxy-client - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt - --requestheader-extra-headers-prefix=X-Remote-Extra- - --requestheader-group-headers=X-Remote-Group - --requestheader-username-headers=X-Remote-User - --secure-port=6443 - --service-account-key-file=/etc/kubernetes/pki/sa.pub - --service-cluster-ip-range=10.96.0.0/12 - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key image: k8s.gcr.io/kube-apiserver:v1.17.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 8 httpGet: host: 172.31.43.3 path: /healthz port: 6443 scheme: HTTPS initialDelaySeconds: 15 timeoutSeconds: 15 name: kube-apiserver resources: requests: cpu: 250m volumeMounts: - mountPath: /etc/ssl/certs name: ca-certs readOnly: true - mountPath: /etc/pki name: etc-pki readOnly: true - mountPath: /etc/kubernetes/pki name: k8s-certs readOnly: true hostNetwork: true priorityClassName: system-cluster-critical volumes: - hostPath: path: /etc/ssl/certs type: DirectoryOrCreate name: ca-certs - hostPath: path: /etc/pki type: DirectoryOrCreate name: etc-pki - hostPath: path: /etc/kubernetes/pki type: DirectoryOrCreate name: k8s-certs status: {} [root@k8s-01 ~]#
kube-controller-manager.yaml
[root@k8s-01 ~]# cat /etc/kubernetes/manifests/kube-controller-manager.yaml apiVersion: v1 kind: Pod metadata: creationTimestamp: null labels: component: kube-controller-manager tier: control-plane name: kube-controller-manager namespace: kube-system spec: containers: - command: - kube-controller-manager - --allocate-node-cidrs=true - --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf - --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf - --bind-address=127.0.0.1 - --client-ca-file=/etc/kubernetes/pki/ca.crt - --cluster-cidr=10.244.0.0/16 - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key - --controllers=*,bootstrapsigner,tokencleaner - --kubeconfig=/etc/kubernetes/controller-manager.conf - --leader-elect=true - --node-cidr-mask-size=24 - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt - --root-ca-file=/etc/kubernetes/pki/ca.crt - --service-account-private-key-file=/etc/kubernetes/pki/sa.key - --service-cluster-ip-range=10.96.0.0/12 - --use-service-account-credentials=true image: k8s.gcr.io/kube-controller-manager:v1.17.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 8 httpGet: host: 127.0.0.1 path: /healthz port: 10257 scheme: HTTPS initialDelaySeconds: 15 timeoutSeconds: 15 name: kube-controller-manager resources: requests: cpu: 200m volumeMounts: - mountPath: /etc/ssl/certs name: ca-certs readOnly: true - mountPath: /etc/pki name: etc-pki readOnly: true - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec name: flexvolume-dir - mountPath: /etc/kubernetes/pki name: k8s-certs readOnly: true - mountPath: /etc/kubernetes/controller-manager.conf name: kubeconfig readOnly: true hostNetwork: true priorityClassName: system-cluster-critical volumes: - hostPath: path: /etc/ssl/certs type: DirectoryOrCreate name: ca-certs - hostPath: path: /etc/pki type: DirectoryOrCreate name: etc-pki - hostPath: path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec type: DirectoryOrCreate name: flexvolume-dir - hostPath: path: /etc/kubernetes/pki type: DirectoryOrCreate name: k8s-certs - hostPath: path: /etc/kubernetes/controller-manager.conf type: FileOrCreate name: kubeconfig status: {} [root@k8s-01 ~]#
kube-scheduler.yaml
[root@k8s-01 ~]# cat /etc/kubernetes/manifests/kube-scheduler.yaml apiVersion: v1 kind: Pod metadata: creationTimestamp: null labels: component: kube-scheduler tier: control-plane name: kube-scheduler namespace: kube-system spec: containers: - command: - kube-scheduler - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf - --bind-address=127.0.0.1 - --kubeconfig=/etc/kubernetes/scheduler.conf - --leader-elect=true image: k8s.gcr.io/kube-scheduler:v1.17.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 8 httpGet: host: 127.0.0.1 path: /healthz port: 10259 scheme: HTTPS initialDelaySeconds: 15 timeoutSeconds: 15 name: kube-scheduler resources: requests: cpu: 100m volumeMounts: - mountPath: /etc/kubernetes/scheduler.conf name: kubeconfig readOnly: true hostNetwork: true priorityClassName: system-cluster-critical volumes: - hostPath: path: /etc/kubernetes/scheduler.conf type: FileOrCreate name: kubeconfig status: {} [root@k8s-01 ~]#
[root@k8s-01 ~]# cat cluster_initialized.txt [init] Using Kubernetes version: v1.17.3 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [k8s-01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.31.43.3] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-01 localhost] and IPs [172.31.43.3 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-01 localhost] and IPs [172.31.43.3 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" [control-plane] Creating static Pod manifest for "kube-scheduler" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 15.004178 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Skipping phase. Please see --upload-certs [mark-control-plane] Marking the node k8s-01 as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node k8s-01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: 1jhsop.wiy4qe0tfqye80lp [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Then you can join any number of worker nodes by running the following on each as root: kubeadm join 172.31.43.3:6443 --token 1jhsop.wiy4qe0tfqye80lp \ --discovery-token-ca-cert-hash sha256:63cf674da8de45ad1482fa70fb685734e9931819021f62f5ae7a078bba601bfc
主机列表
Ansible 18.163.102.197/172.31.34.153 k8s-01 18.163.35.70/172.31.43.3 k8s-02 18.162.148.167/172.31.37.84 k8s-03 18.163.103.104/172.31.37.22
Amazon EC2主机默认禁用root登录及密码验证的处理
sudo sed -i 's/^\#PermitRootLogin yes/PermitRootLogin yes/' /etc/ssh/sshd_config sudo sed -i 's/^PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config sudo systemctl restart sshd
查看本地主机Ansible版本信息
[root@ip-172-31-34-153 ~]# ansible --version ansible 2.9.5 config file = /etc/ansible/ansible.cfg configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python2.7/site-packages/ansible executable location = /bin/ansible python version = 2.7.5 (default, Oct 30 2018, 23:45:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)] [root@ip-172-31-34-153 ~]#
禁用本地主机严格密钥检查
[root@ip-172-31-34-153 ~]# vi /etc/ssh/ssh_config StrictHostKeyChecking no
生成密钥对并分发公钥到远程主机
[root@ip-172-31-34-153 ~]# ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa Generating public/private rsa key pair. Your identification has been saved in /root/.ssh/id_rsa. Your public key has been saved in /root/.ssh/id_rsa.pub. The key fingerprint is: SHA256:Gj5nl42xywRn0/s9hjBeACErGJWjQhfoDuEDT2yjYfE root@ip-172-31-34-153.ap-east-1.compute.internal The key's randomart image is: +---[RSA 2048]----+ | oooo... .. | |++*.oo o. | |*B.E.... . | |o=.. . o | |o o . S = o | | . . o + X o | | + o B * . | | + + o o + | | o o o| +----[SHA256]-----+ [root@ip-172-31-34-153 ~]# cat .ssh/id_rsa.pub ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC29DSROHgwWlucHoL/B+S/4Rd1KsVEbYLmM4p0+Ptx4NjGooEhrnNjIhpKmPNI5zvGtganSia2A7Vsp5Y+IVOgThRjzptQQzmbEloIqv6SsJRDyrUQIPV9dv3jv5pvbtAN0D5rh1AATPh0FNBtnkvm6HLowjueKdE6pBiq74NTPc5jfDuvwq2S5s4Ztnw9NsTuIlIiC7STCfuDo7NoxRVl+QumD12tW52CPd4ZjA4vg4v7xr/BF/rRxdFuG6+740s2kO1EZNaUOoi99qMLQiScOK+SLw+/tN66EmZC0uMeYlDiZZ1VsLb2MMd11CJDWSZ9SZbd1dHQbXywUbj0tRQF root@ip-172-31-34-153.ap-east-1.compute.internal [root@ip-172-31-34-153 ~]#
分发公钥
[root@ip-172-31-34-153 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@18.163.35.70 /bin/ssh-copy-id: INFO: Source of key(s) to be installed: ".ssh/id_rsa.pub" /bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed /bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys root@18.163.35.70's password: Number of key(s) added: 1 Now try logging into the machine, with: "ssh 'root@18.163.35.70'" and check to make sure that only the key(s) you wanted were added. [root@ip-172-31-34-153 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@18.162.148.167 /bin/ssh-copy-id: INFO: Source of key(s) to be installed: ".ssh/id_rsa.pub" /bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed /bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys root@18.162.148.167's password: Number of key(s) added: 1 Now try logging into the machine, with: "ssh 'root@18.162.148.167'" and check to make sure that only the key(s) you wanted were added. [root@ip-172-31-34-153 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@18.163.103.104 /bin/ssh-copy-id: INFO: Source of key(s) to be installed: ".ssh/id_rsa.pub" /bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed /bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys root@18.163.103.104's password: Number of key(s) added: 1 Now try logging into the machine, with: "ssh 'root@18.163.103.104'" and check to make sure that only the key(s) you wanted were added. [root@ip-172-31-34-153 ~]#
配置Ansible主机清单
[root@ip-172-31-34-153 ~]# mkdir kube-cluster [root@ip-172-31-34-153 ~]# cd kube-cluster/ [root@ip-172-31-34-153 kube-cluster]# vi hosts [masters] master ansible_host=18.163.35.70 ansible_user=root [workers] worker1 ansible_host=18.162.148.167 ansible_user=root worker2 ansible_host=18.163.103.104 ansible_user=root
准备基本环境Playbook配置文件(k8s-01/k8s-02/k8s-03)
[root@ip-172-31-34-153 kube-cluster]# vi kube-dependencies.yaml - hosts: all become: yes tasks: - name: Install yum utils yum: name: yum-utils state: latest - name: Install device-mapper-persistent-data yum: name: device-mapper-persistent-data state: latest - name: Install lvm2 yum: name: lvm2 state: latest - name: Add Docker repo get_url: url: https://download.docker.com/linux/centos/docker-ce.repo dest: /etc/yum.repos.d/docer-ce.repo - name: install Docker yum: name: docker-ce state: latest update_cache: true - name: start Docker service: name: docker state: started enabled: yes - name: disable SELinux command: setenforce 0 - name: disable SELinux on reboot selinux: state: disabled - name: ensure net.bridge.bridge-nf-call-ip6tables is set to 1 sysctl: name: net.bridge.bridge-nf-call-ip6tables value: 1 state: present - name: ensure net.bridge.bridge-nf-call-iptables is set to 1 sysctl: name: net.bridge.bridge-nf-call-iptables value: 1 state: present - name: add Kubernetes' YUM repository yum_repository: name: Kubernetes description: Kubernetes YUM repository baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg gpgcheck: yes - name: install kubelet yum: name: kubelet-1.17.3 state: present update_cache: true - name: install kubeadm yum: name: kubeadm-1.17.3 state: present - name: start kubelet service: name: kubelet enabled: yes state: started - hosts: master become: yes tasks: - name: install kubectl yum: name: kubectl-1.17.3 state: present allow_downgrade: yes
执行
[root@ip-172-31-34-153 kube-cluster]# ansible-playbook -i ./hosts kube-dependencies.yaml PLAY [all] ***************************************************************************************************** TASK [Gathering Facts] ***************************************************************************************** ok: [worker1] ok: [worker2] ok: [master] TASK [Install yum utils] *************************************************************************************** changed: [worker1] changed: [master] changed: [worker2] TASK [Install device-mapper-persistent-data] ******************************************************************* changed: [worker1] changed: [worker2] changed: [master] TASK [Install lvm2] ******************************************************************************************** changed: [worker2] changed: [worker1] changed: [master] TASK [Add Docker repo] ***************************************************************************************** changed: [worker2] changed: [worker1] changed: [master] TASK [install Docker] ****************************************************************************************** changed: [worker1] changed: [worker2] changed: [master] TASK [start Docker] ******************************************************************************************** changed: [worker1] changed: [master] changed: [worker2] TASK [disable SELinux] ***************************************************************************************** changed: [worker2] changed: [worker1] changed: [master] TASK [disable SELinux on reboot] ******************************************************************************* [WARNING]: SELinux state change will take effect next reboot changed: [worker2] changed: [worker1] changed: [master] TASK [ensure net.bridge.bridge-nf-call-ip6tables is set to 1] ************************************************** [WARNING]: The value 1 (type int) in a string field was converted to u'1' (type string). If this does not look like what you expect, quote the entire value to ensure it does not change. changed: [worker2] changed: [worker1] changed: [master] TASK [ensure net.bridge.bridge-nf-call-iptables is set to 1] *************************************************** changed: [worker1] changed: [worker2] changed: [master] TASK [add Kubernetes' YUM repository] ************************************************************************** changed: [worker2] changed: [worker1] changed: [master] TASK [install kubelet] ***************************************************************************************** changed: [worker1] changed: [worker2] changed: [master] TASK [install kubeadm] ***************************************************************************************** changed: [worker1] changed: [worker2] changed: [master] TASK [start kubelet] ******************************************************************************************* changed: [worker2] changed: [worker1] changed: [master] PLAY [master] ************************************************************************************************** TASK [Gathering Facts] ***************************************************************************************** ok: [master] TASK [install kubectl] ***************************************************************************************** ok: [master] PLAY RECAP ***************************************************************************************************** master : ok=17 changed=14 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 worker1 : ok=15 changed=14 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 worker2 : ok=15 changed=14 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 [root@ip-172-31-34-153 kube-cluster]#
准备主节点Palybook配置文件(k8s-01)
[root@ip-172-31-34-153 kube-cluster]# vi master.yaml - hosts: master become: yes tasks: - name: initialize the cluster shell: kubeadm init --pod-network-cidr=10.244.0.0/16 >> cluster_initialized.txt args: chdir: $HOME creates: cluster_initialized.txt - name: create .kube directory become: yes become_user: centos file: path: $HOME/.kube state: directory mode: 0755 - name: copy admin.conf to user's kube config copy: src: /etc/kubernetes/admin.conf dest: /home/centos/.kube/config remote_src: yes owner: centos - name: install Pod network become: yes become_user: centos shell: kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml >> pod_network_setup.txt args: chdir: $HOME creates: pod_network_setup.txt
执行
[root@ip-172-31-34-153 kube-cluster]# ansible-playbook -i ./hosts master.yaml PLAY [master] ************************************************************************************************** TASK [Gathering Facts] ***************************************************************************************** ok: [master] TASK [initialize the cluster] ********************************************************************************** ok: [master] TASK [create .kube directory] ********************************************************************************** [WARNING]: Module remote_tmp /home/centos/.ansible/tmp did not exist and was created with a mode of 0700, this may cause issues when running as another user. To avoid this, create the remote_tmp dir with the correct permissions manually changed: [master] TASK [copy admin.conf to user's kube config] ******************************************************************* changed: [master] TASK [install Pod network] ************************************************************************************* changed: [master] PLAY RECAP ***************************************************************************************************** master : ok=5 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 [root@ip-172-31-34-153 kube-cluster]#
使用非特权用户centos验证Kubernetes集群及主节点状态
[centos@k8s-01 ~]$ kubectl get nodes NAME STATUS ROLES AGE VERSION k8s-01 Ready master 155m v1.17.3 [centos@k8s-01 ~]$
准备工作节点Playbook配置文件(k8s-02/k8s-03)
[root@ip-172-31-34-153 kube-cluster]# vi workers.yaml - hosts: master become: yes gather_facts: false tasks: - name: get join command shell: kubeadm token create --print-join-command register: join_command_raw - name: set join command set_fact: join_command: "{{ join_command_raw.stdout_lines[0] }}" - hosts: workers become: yes tasks: - name: join cluster shell: "{{ hostvars['master'].join_command }} --ignore-preflight-errors all >> node_joined.txt" args: chdir: $HOME creates: node_joined.txt
执行
[root@ip-172-31-34-153 kube-cluster]# ansible-playbook -i hosts workers.yaml PLAY [master] ************************************************************************************************** TASK [get join command] **************************************************************************************** changed: [master] TASK [set join command] **************************************************************************************** ok: [master] PLAY [workers] ************************************************************************************************* TASK [Gathering Facts] ***************************************************************************************** ok: [worker2] ok: [worker1] TASK [join cluster] ******************************************************************************************** changed: [worker2] changed: [worker1] PLAY RECAP ***************************************************************************************************** master : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 worker1 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 worker2 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 [root@ip-172-31-34-153 kube-cluster]#
验证集群状态
[centos@k8s-01 ~]$ kubectl get nodes NAME STATUS ROLES AGE VERSION k8s-01 Ready master 159m v1.17.3 k8s-02 Ready <none> 41s v1.17.3 k8s-03 Ready <none> 41s v1.17.3 [centos@k8s-01 ~]$
部署容器化应用程序进行测试
[centos@k8s-01 ~]$ kubectl create deployment nginx --image=nginx deployment.apps/nginx created [centos@k8s-01 ~]$ [centos@k8s-01 ~]$ kubectl expose deploy nginx --port 80 --target-port 80 --type NodePort service/nginx exposed [centos@k8s-01 ~]$ [centos@k8s-01 ~]$ kubectl get services NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 179m nginx NodePort 10.109.120.31 <none> 80:30596/TCP 15s [centos@k8s-01 ~]$
使用浏览器访问
删除已部署的容器化应用
[centos@k8s-01 ~]$ kubectl delete service nginx service "nginx" deleted [centos@k8s-01 ~]$ kubectl get service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3h4m [centos@k8s-01 ~]$ [centos@k8s-01 ~]$ kubectl delete deployment nginx deployment.apps "nginx" deleted [centos@k8s-01 ~]$ kubectl get deployments No resources found in default namespace. [centos@k8s-01 ~]$
客户端命令行接口配置文件详情(基于PKI体系的服务器及客户端证书验证)
apiVersion: v1 clusters: - cluster: certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJd01ETXdOekE1TVRNeU1Wb1hEVE13TURNd05UQTVNVE15TVZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHl4Ckx2M25ESzZHaDgxN1pjWmpqUVV5em13RlVvdzZhZDV1T1Jabzg2Q0tsNW52RnF3VjRYL2E4OGx2S1grc2xqWDkKSDZGR2Y2bm1uM2JMTnlXWWEreThGcllUMHBQR2x3aG5qWE1WSkJlUW9SS2NiK2hySERPZlNGZ0xsZjQ0TWR1VwpPd3Vmb2VTYnJpL3hoZ0ExMXhqbStmVGJNV3ZkNkZVM0h6ZW9WeEtsdVJNcmJVL0YySHFVN0R1ZEV6dUNQUWFsCk1OOUxiblZJcUtwREp5VzhmODY1V29MUHJlWjhMZkZqMVQvMXl2ZEk1dkJwTFBKc0NZUndLdndSTEhZajAzTHMKRVA5QlpuRkhNRDYwV3RuZXc4bkdaRjJkWTdIRHZRa1V2M2hoemtVMXRLa3BncWhvM2tCUytoUUNwUEpLMzZLMgplOG9aT2NrTDJsYjJzTmpBck84Q0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTnFSMXYwUGVPKy9TR05OcXN6S2MzNHk5RGkKVjA5dFZoemRRNEV6aGtoM0ZOS0RRMDZ0VTNPcUw2dzREano2SnlwSW9IaGVsTXVxVmJtY0V5WE9WNzYwZ0hPRQpJaWJ0ZlJhcVdPMVc2RXE0NklQbjEwZkFWNzRwNjhVcWdQdjkra0RSb2grYWhobFJFTGJJdTJNcjAzNHBjcWduClZSK01lWGZ6Q0VvalF3dzd0ZVJGNnpMTCtQa0duNHI4L24rNFNGUjJIK2lDVCtiVzNxZWdCYi9MWWwyTmNMTHMKVDEvcnROZnFTaEIyV2dYbXZKUkl2YXRIWWtUdUZCN1IwZ0pkQUJJWXdkSGlpbVN4TkdDK05WRzIzL3BDdmRKUApFcjFPd2xuWFBMSStiOHpXNDNEanVjd0pPdTY0alZEVmduNUpJUDZqNjRuYnN2eC9VSkIvOUZNK0lVST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= server: https://172.31.43.3:6443 name: kubernetes contexts: - context: cluster: kubernetes user: kubernetes-admin name: kubernetes-admin@kubernetes current-context: kubernetes-admin@kubernetes kind: Config preferences: {} users: - name: kubernetes-admin user: client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJY0trbWVQMXNaZnd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TURBek1EY3dPVEV6TWpGYUZ3MHlNVEF6TURjd09URXpNak5hTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXhTbFZmb1IyVTQ2UGdCbzAKR2kyL2NROFEzVldCcVpaNzRwM3cxZ1pDS2dzaUhya3RGWTdrTm52Y3hLNXVPRjZIN1YxS0JrYmRUNXZvVlZ2YQpFRlY3TU5RZUZ6RDEzWkFKK2dOVFN5RFUrY21qT2xnQW1xMktZeHdKbTNBNUdnNFRSbVpUN01mS3FxMVc4V2lxClZlWkY1cnViUkdpb3Z0WWR5L3BHUEs1b0dJaWtpd2w0QU9SMXFGRG80ejR3SmtyMEd5OUxSSzhNZ0RkeEhrSk0KQklrZ2QrbnFpODBGZUpLM2JzWTBjUG9LYk9QbEx4Vm9XQW5iUWEyNjVqYXBQbitNdEpKWkdRelFwYXhranE5RApvek1Pa3pnV0dQMFZKcC9CUXFINGI5NTFXaUFpNTMwbVlvVTVRUDJwaFR6amtUbG1PQlErd3hoZDNKaU9TdjUwCkVmdkdHUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFIdSt5MjRxa2F0Y21rZkJYRUtrUXg1SUdvNm9Ud0FIcnRqdQo5SUw1MTZ4cVZPMlUvblAwM3hqbHBVdkFSR1dSU3czRjZsanNkUTM5VS9DMGQ2SVNGT0t4K2VkMFE5MVptYW03CnNib0liaXJSeDdVa3ErdThoS3dRK1Zad1Z0akdLUWYwclB2STFkb2drcHJldkF2Myt3OUdld3p5Y0NqemxIbE0KU09pdFdYYkdpdzBoWmk3a25lYmdMQVEvdkVVSlFrNFNVK21oMTJIaVNZY0R2WlJOZkJOUzNONnpPMnZXUGFrcwpFMVIvZ1BBTmlMMllTSXpnQVAwSyszTzJGVzc1SndLa3dXUlNEM1NIZWQxbTZIYlVGcTlBUEdWOXB1eHJTZXJoCkF0T2QzbTdIUnRCS3Q1L29ZaUNva1NBRjZIR1hJcCtEYTFBMFZQRkU0YlVkQjl5MUlHWT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeFNsVmZvUjJVNDZQZ0JvMEdpMi9jUThRM1ZXQnFaWjc0cDN3MWdaQ0tnc2lIcmt0CkZZN2tObnZjeEs1dU9GNkg3VjFLQmtiZFQ1dm9WVnZhRUZWN01OUWVGekQxM1pBSitnTlRTeURVK2Ntak9sZ0EKbXEyS1l4d0ptM0E1R2c0VFJtWlQ3TWZLcXExVzhXaXFWZVpGNXJ1YlJHaW92dFlkeS9wR1BLNW9HSWlraXdsNApBT1IxcUZEbzR6NHdKa3IwR3k5TFJLOE1nRGR4SGtKTUJJa2dkK25xaTgwRmVKSzNic1kwY1BvS2JPUGxMeFZvCldBbmJRYTI2NWphcFBuK010SkpaR1F6UXBheGtqcTlEb3pNT2t6Z1dHUDBWSnAvQlFxSDRiOTUxV2lBaTUzMG0KWW9VNVFQMnBoVHpqa1RsbU9CUSt3eGhkM0ppT1N2NTBFZnZHR1FJREFRQUJBb0lCQVFDQ2s1bDNyU3JncyszKwpIVnljYWVmOGJNbnlqSXJQVWtiQ0UzQkpqdU9MRE15UUpIdmpaenRsaWlyd1o4Vy90M3Uyaks1VjhlRG90SXp1CjIySlVwd2hya2xCTGM3V2lBNTlYNFpQc2tkWDdpTHQrRElKNTdxMVVibUUrZk5pVWxQWFhEalpPL3hNT2JyYkMKTTF0OGdJR1RDblVPblhJRTBiSHlRZEw2cFZkenh3Ri9EeFNNTy9zOGxLOEh3K0RzT0xxU3FPbHoyOUpuYk9CeAp1aEMzK3VMalc4Rmpsblh6K25JQWRaWFZoRkp0dG43a1dkak1jZXkyTGZCc1NZbGZlWlhZaTRGTE8xbmNPWGpuCkYwLzNhU2g0UmtPeXZvZDZRSEVxTmFnS0ZPOUZqd29hQzRmWkxLQjBrTG16UlZYa1BiR2lDRXB1N1ozSEw0c3UKaFRaYTNUekJBb0dCQU8zMXlBWDVtYTR2U3FlK2V5eEx2L201WEhtb2QweDhXNUFkcU51ZzNuRjdUSE4zMXppbQpmYVBwTjd4R2lwcXNwMVlGQzBheC9FZDNJYW1RcWVSRlRtTHgrRmttb3NNSThBbUV2U0EvL0JTVWVhYTUzeWtwCkt1NXEzNFBWWW5OSXZpcWpTM1ZITERGckw5MlUzNnVBTk9uMTJwZUw3ek1kOXVOT0srNlV3L20xQW9HQkFOUWIKd0g0RWRUbVAwS2V5V0hmYlBheFhxSVJqV2xLeFhHTU5JcnRVZWNLQ0NadWFTNnE5TFYxWk5KZkMyamN3TFRKMApDMVB2RkNjWjAwRUFScGlkS2lYL0ZaQzloRHZ6TkpsUnRseGs0aGVZVUVoa0lQL1RtcVUvTWZhSEhBREhlbDNCCkNPL1BuUnU5Y3g0NmwxZjBOcm5XRVJoa2J5TTJ4Mzc1ek5xb0tJbFZBb0dBUzhxKy9QZzFOTCtuWGFwVC9SWGIKZmFUR2laRlkvaW1WMkY4NkMwby96NUZnRmw4VFU5M2pvck9EcHhvb3gzODZoVEZ5R0FCVXhFWnptRmlWWkRtVwo3L2oyQ3g4OU5EWENqcVdTdjVUaHE0Um5BdTJzNEtWV0lUNDFGdjUrTHczNlZBWlM0SFhjNDVpcVZEODR4cDA5ClBVK3JZaDJXQUlnSXZQbUhFS1NkandrQ2dZQm53dHU3eWZwK21qZjhrV1p0MjdhajVJM3ZsWnJOOFMyODF1UXkKdC9TSWpveWNyakp0NS9XVlFOcFZrMkNrdHRDbGFkZFF6QmdUdUxKN2plTDdMWWM4NXpocGdneDZOMU4zM1YxVQpmWldNN1ZuNHorTEV3NE5YYXo3SjF2Wi8reFdGWDdVN2UxamtCUjJYb0JvQlVOcWt0bS9PZXZOVFNxejFGTVorCkFOMHpzUUtCZ1FDaDROSlEvVjhhc3prOURnZ2F5bnZ1Z2JWWVg1R0lFNGRSRng3Z3dXek5BckI0V1pUODVHeDgKSzByN3BLdTJsYmh2OFE1UU9GdFFhS0JwcCtjb1g2a3cvbTJZdWdYeVdiREpScEY4ODJXbkQzYWhvbW10WTlXZgpOWmJkeGRXNk8xZ1dURTg1ODV3YW5uOWFZR3g5Q21xNDJ4Sk9SaURPakFZWWEyR3phTHI2SHc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
命令行工具kubectl基本命令参数
[centos@k8s-01 ~]$ kubectl kubectl controls the Kubernetes cluster manager. Find more information at: https://kubernetes.io/docs/reference/kubectl/overview/ Basic Commands (Beginner): create Create a resource from a file or from stdin. expose Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service run Run a particular image on the cluster set Set specific features on objects Basic Commands (Intermediate): explain Documentation of resources get Display one or many resources edit Edit a resource on the server delete Delete resources by filenames, stdin, resources and names, or by resources and label selector Deploy Commands: rollout Manage the rollout of a resource scale Set a new size for a Deployment, ReplicaSet or Replication Controller autoscale Auto-scale a Deployment, ReplicaSet, or ReplicationController Cluster Management Commands: certificate Modify certificate resources. cluster-info Display cluster info top Display Resource (CPU/Memory/Storage) usage. cordon Mark node as unschedulable uncordon Mark node as schedulable drain Drain node in preparation for maintenance taint Update the taints on one or more nodes Troubleshooting and Debugging Commands: describe Show details of a specific resource or group of resources logs Print the logs for a container in a pod attach Attach to a running container exec Execute a command in a container port-forward Forward one or more local ports to a pod proxy Run a proxy to the Kubernetes API server cp Copy files and directories to and from containers. auth Inspect authorization Advanced Commands: diff Diff live version against would-be applied version apply Apply a configuration to a resource by filename or stdin patch Update field(s) of a resource using strategic merge patch replace Replace a resource by filename or stdin wait Experimental: Wait for a specific condition on one or many resources. convert Convert config files between different API versions kustomize Build a kustomization target from a directory or a remote url. Settings Commands: label Update the labels on a resource annotate Update the annotations on a resource completion Output shell completion code for the specified shell (bash or zsh) Other Commands: api-resources Print the supported API resources on the server api-versions Print the supported API versions on the server, in the form of "group/version" config Modify kubeconfig files plugin Provides utilities for interacting with plugins. version Print the client and server version information Usage: kubectl [flags] [options] Use "kubectl <command> --help" for more information about a given command. Use "kubectl options" for a list of global command-line options (applies to all commands). [centos@k8s-01 ~]$
内容引用
https://www.digitalocean.com/community/tutorials/how-to-create-a-kubernetes-cluster-using-kubeadm-on-centos-7