4 月 212020
 

基础环境安装脚本(基于Amazon AWS EC2 CentOS 7环境)

#!/bin/bash
#

#
setenforce 0;
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config;
#
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p;
#
yum makecache;
yum install -y yum-utils;
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
yum install -y docker-ce docker-ce-cli containerd.io;
#
cat <<EOF >> /etc/hosts
172.31.3.209 k8s01
172.31.8.132 k8s02
172.31.10.229 k8s03
EOF
#
mkdir /etc/docker;
cat <<EOF > /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF
#
systemctl daemon-reload;
systemctl enable docker;
systemctl restart docker;
#
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
#
yum install -y kubectl kubelet kubeadm;
systemctl enable kubelet;

执行脚本

[root@k8s01 ~]# vi deploy.sh
[root@k8s01 ~]# chmod 700 deploy.sh 
[root@k8s01 ~]# ./deploy.sh

初始化master节点

kubeadm init --apiserver-advertise-address=172.31.14.12 --pod-network-cidr=10.244.0.0/16

[root@k8s01 ~]# kubeadm init --apiserver-advertise-address=172.31.14.12 --pod-network-cidr=10.244.0.0/16

配置本地命令环境

[root@k8s01 ~]# mkdir -p $HOME/.kube
[root@k8s01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

节点加入集群

kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \
--discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859

节点2加入

[root@k8s02 ~]# kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \
> --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0420 10:23:48.432125 9198 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s02 ~]#

节点3加入

[root@k8s03 ~]# kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \
> --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
W0420 10:24:14.829097 9202 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s03 ~]#

安装flannel网络

[root@k8s01 ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
[root@k8s01 ~]#

获取节点信息

[root@k8s01 ~]# kubectl get nodes
NAME    STATUS   ROLES    AGE   VERSION
k8s01   Ready    master   12m   v1.18.2
k8s02   Ready    <none>   10m   v1.18.2
k8s03   Ready    <none>   10m   v1.18.2
[root@k8s01 ~]#

查看集群组件状态

[root@k8s01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
[root@k8s01 ~]# 

查看本地镜像列表

[root@k8s01 ~]# docker image ls
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-proxy                v1.18.2             0d40868643c6        3 days ago          117MB
k8s.gcr.io/kube-scheduler            v1.18.2             a3099161e137        3 days ago          95.3MB
k8s.gcr.io/kube-apiserver            v1.18.2             6ed75ad404bd        3 days ago          173MB
k8s.gcr.io/kube-controller-manager   v1.18.2             ace0a8c17ba9        3 days ago          162MB
quay.io/coreos/flannel               v0.12.0-amd64       4e9f801d2217        5 weeks ago         52.8MB
k8s.gcr.io/pause                     3.2                 80d28bedfe5d        2 months ago        683kB
k8s.gcr.io/coredns                   1.6.7               67da37a9a360        2 months ago        43.8MB
k8s.gcr.io/etcd                      3.4.3-0             303ce5db0e90        5 months ago        288MB
[root@k8s01 ~]#

 

3 月 072020
 

主要组件服务配置文件

[root@k8s-01 ~]# ls /etc/kubernetes/manifests/
etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml kube-scheduler.yaml
[root@k8s-01 ~]#

etcd.yaml

[root@k8s-01 ~]# cat /etc/kubernetes/manifests/etcd.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: etcd
    tier: control-plane
  name: etcd
  namespace: kube-system
spec:
  containers:
  - command:
    - etcd
    - --advertise-client-urls=https://172.31.43.3:2379
    - --cert-file=/etc/kubernetes/pki/etcd/server.crt
    - --client-cert-auth=true
    - --data-dir=/var/lib/etcd
    - --initial-advertise-peer-urls=https://172.31.43.3:2380
    - --initial-cluster=k8s-01=https://172.31.43.3:2380
    - --key-file=/etc/kubernetes/pki/etcd/server.key
    - --listen-client-urls=https://127.0.0.1:2379,https://172.31.43.3:2379
    - --listen-metrics-urls=http://127.0.0.1:2381
    - --listen-peer-urls=https://172.31.43.3:2380
    - --name=k8s-01
    - --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
    - --peer-client-cert-auth=true
    - --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
    - --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
    - --snapshot-count=10000
    - --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
    image: k8s.gcr.io/etcd:3.4.3-0
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 127.0.0.1
        path: /health
        port: 2381
        scheme: HTTP
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: etcd
    resources: {}
    volumeMounts:
    - mountPath: /var/lib/etcd
      name: etcd-data
    - mountPath: /etc/kubernetes/pki/etcd
      name: etcd-certs
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/kubernetes/pki/etcd
      type: DirectoryOrCreate
    name: etcd-certs
  - hostPath:
      path: /var/lib/etcd
      type: DirectoryOrCreate
    name: etcd-data
status: {}
[root@k8s-01 ~]#

kube-apiserver.yaml

[root@k8s-01 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: kube-apiserver
    tier: control-plane
  name: kube-apiserver
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-apiserver
    - --advertise-address=172.31.43.3
    - --allow-privileged=true
    - --authorization-mode=Node,RBAC
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --enable-admission-plugins=NodeRestriction
    - --enable-bootstrap-token-auth=true
    - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
    - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
    - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
    - --etcd-servers=https://127.0.0.1:2379
    - --insecure-port=0
    - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
    - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
    - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
    - --requestheader-allowed-names=front-proxy-client
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --requestheader-extra-headers-prefix=X-Remote-Extra-
    - --requestheader-group-headers=X-Remote-Group
    - --requestheader-username-headers=X-Remote-User
    - --secure-port=6443
    - --service-account-key-file=/etc/kubernetes/pki/sa.pub
    - --service-cluster-ip-range=10.96.0.0/12
    - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
    image: k8s.gcr.io/kube-apiserver:v1.17.3
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 172.31.43.3
        path: /healthz
        port: 6443
        scheme: HTTPS
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: kube-apiserver
    resources:
      requests:
        cpu: 250m
    volumeMounts:
    - mountPath: /etc/ssl/certs
      name: ca-certs
      readOnly: true
    - mountPath: /etc/pki
      name: etc-pki
      readOnly: true
    - mountPath: /etc/kubernetes/pki
      name: k8s-certs
      readOnly: true
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/ssl/certs
      type: DirectoryOrCreate
    name: ca-certs
  - hostPath:
      path: /etc/pki
      type: DirectoryOrCreate
    name: etc-pki
  - hostPath:
      path: /etc/kubernetes/pki
      type: DirectoryOrCreate
    name: k8s-certs
status: {}
[root@k8s-01 ~]#

kube-controller-manager.yaml

[root@k8s-01 ~]# cat /etc/kubernetes/manifests/kube-controller-manager.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: kube-controller-manager
    tier: control-plane
  name: kube-controller-manager
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-controller-manager
    - --allocate-node-cidrs=true
    - --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf
    - --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf
    - --bind-address=127.0.0.1
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --cluster-cidr=10.244.0.0/16
    - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
    - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
    - --controllers=*,bootstrapsigner,tokencleaner
    - --kubeconfig=/etc/kubernetes/controller-manager.conf
    - --leader-elect=true
    - --node-cidr-mask-size=24
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --root-ca-file=/etc/kubernetes/pki/ca.crt
    - --service-account-private-key-file=/etc/kubernetes/pki/sa.key
    - --service-cluster-ip-range=10.96.0.0/12
    - --use-service-account-credentials=true
    image: k8s.gcr.io/kube-controller-manager:v1.17.3
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 127.0.0.1
        path: /healthz
        port: 10257
        scheme: HTTPS
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: kube-controller-manager
    resources:
      requests:
        cpu: 200m
    volumeMounts:
    - mountPath: /etc/ssl/certs
      name: ca-certs
      readOnly: true
    - mountPath: /etc/pki
      name: etc-pki
      readOnly: true
    - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
      name: flexvolume-dir
    - mountPath: /etc/kubernetes/pki
      name: k8s-certs
      readOnly: true
    - mountPath: /etc/kubernetes/controller-manager.conf
      name: kubeconfig
      readOnly: true
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/ssl/certs
      type: DirectoryOrCreate
    name: ca-certs
  - hostPath:
      path: /etc/pki
      type: DirectoryOrCreate
    name: etc-pki
  - hostPath:
      path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
      type: DirectoryOrCreate
    name: flexvolume-dir
  - hostPath:
      path: /etc/kubernetes/pki
      type: DirectoryOrCreate
    name: k8s-certs
  - hostPath:
      path: /etc/kubernetes/controller-manager.conf
      type: FileOrCreate
    name: kubeconfig
status: {}
[root@k8s-01 ~]#

kube-scheduler.yaml

[root@k8s-01 ~]# cat /etc/kubernetes/manifests/kube-scheduler.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: kube-scheduler
    tier: control-plane
  name: kube-scheduler
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-scheduler
    - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
    - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
    - --bind-address=127.0.0.1
    - --kubeconfig=/etc/kubernetes/scheduler.conf
    - --leader-elect=true
    image: k8s.gcr.io/kube-scheduler:v1.17.3
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 127.0.0.1
        path: /healthz
        port: 10259
        scheme: HTTPS
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: kube-scheduler
    resources:
      requests:
        cpu: 100m
    volumeMounts:
    - mountPath: /etc/kubernetes/scheduler.conf
      name: kubeconfig
      readOnly: true
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/kubernetes/scheduler.conf
      type: FileOrCreate
    name: kubeconfig
status: {}
[root@k8s-01 ~]#
3 月 072020
 
[root@k8s-01 ~]# cat cluster_initialized.txt
[init] Using Kubernetes version: v1.17.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.31.43.3]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-01 localhost] and IPs [172.31.43.3 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-01 localhost] and IPs [172.31.43.3 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 15.004178 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 1jhsop.wiy4qe0tfqye80lp
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.31.43.3:6443 --token 1jhsop.wiy4qe0tfqye80lp \
    --discovery-token-ca-cert-hash sha256:63cf674da8de45ad1482fa70fb685734e9931819021f62f5ae7a078bba601bfc
3 月 072020
 

主机列表

Ansible 18.163.102.197/172.31.34.153
k8s-01 18.163.35.70/172.31.43.3
k8s-02 18.162.148.167/172.31.37.84
k8s-03 18.163.103.104/172.31.37.22

Amazon EC2主机默认禁用root登录及密码验证的处理

sudo sed -i 's/^\#PermitRootLogin yes/PermitRootLogin yes/' /etc/ssh/sshd_config
sudo sed -i 's/^PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config
sudo systemctl restart sshd

查看本地主机Ansible版本信息

[root@ip-172-31-34-153 ~]# ansible --version
ansible 2.9.5
  config file = /etc/ansible/ansible.cfg
  configured module search path = [u'/root/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
  ansible python module location = /usr/lib/python2.7/site-packages/ansible
  executable location = /bin/ansible
  python version = 2.7.5 (default, Oct 30 2018, 23:45:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-36)]
[root@ip-172-31-34-153 ~]#

禁用本地主机严格密钥检查

[root@ip-172-31-34-153 ~]# vi /etc/ssh/ssh_config
StrictHostKeyChecking no

生成密钥对并分发公钥到远程主机

[root@ip-172-31-34-153 ~]# ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
Generating public/private rsa key pair.
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:Gj5nl42xywRn0/s9hjBeACErGJWjQhfoDuEDT2yjYfE root@ip-172-31-34-153.ap-east-1.compute.internal
The key's randomart image is:
+---[RSA 2048]----+
| oooo... ..      |
|++*.oo  o.       |
|*B.E....  .      |
|o=..  .    o     |
|o o   . S = o    |
| .   . o + X o   |
|      + o B * .  |
|       + + o o + |
|          o   o o|
+----[SHA256]-----+
[root@ip-172-31-34-153 ~]# cat .ssh/id_rsa.pub 
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC29DSROHgwWlucHoL/B+S/4Rd1KsVEbYLmM4p0+Ptx4NjGooEhrnNjIhpKmPNI5zvGtganSia2A7Vsp5Y+IVOgThRjzptQQzmbEloIqv6SsJRDyrUQIPV9dv3jv5pvbtAN0D5rh1AATPh0FNBtnkvm6HLowjueKdE6pBiq74NTPc5jfDuvwq2S5s4Ztnw9NsTuIlIiC7STCfuDo7NoxRVl+QumD12tW52CPd4ZjA4vg4v7xr/BF/rRxdFuG6+740s2kO1EZNaUOoi99qMLQiScOK+SLw+/tN66EmZC0uMeYlDiZZ1VsLb2MMd11CJDWSZ9SZbd1dHQbXywUbj0tRQF root@ip-172-31-34-153.ap-east-1.compute.internal
[root@ip-172-31-34-153 ~]#

分发公钥

[root@ip-172-31-34-153 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@18.163.35.70
/bin/ssh-copy-id: INFO: Source of key(s) to be installed: ".ssh/id_rsa.pub"
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@18.163.35.70's password:

Number of key(s) added: 1

Now try logging into the machine, with: "ssh 'root@18.163.35.70'"
and check to make sure that only the key(s) you wanted were added.

[root@ip-172-31-34-153 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@18.162.148.167
/bin/ssh-copy-id: INFO: Source of key(s) to be installed: ".ssh/id_rsa.pub"
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@18.162.148.167's password:

Number of key(s) added: 1

Now try logging into the machine, with: "ssh 'root@18.162.148.167'"
and check to make sure that only the key(s) you wanted were added.

[root@ip-172-31-34-153 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@18.163.103.104
/bin/ssh-copy-id: INFO: Source of key(s) to be installed: ".ssh/id_rsa.pub"
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@18.163.103.104's password:

Number of key(s) added: 1

Now try logging into the machine, with: "ssh 'root@18.163.103.104'"
and check to make sure that only the key(s) you wanted were added.

[root@ip-172-31-34-153 ~]#

配置Ansible主机清单

[root@ip-172-31-34-153 ~]# mkdir kube-cluster
[root@ip-172-31-34-153 ~]# cd kube-cluster/
[root@ip-172-31-34-153 kube-cluster]# vi hosts
[masters]
master ansible_host=18.163.35.70 ansible_user=root

[workers]
worker1 ansible_host=18.162.148.167 ansible_user=root
worker2 ansible_host=18.163.103.104 ansible_user=root

准备基本环境Playbook配置文件(k8s-01/k8s-02/k8s-03)

[root@ip-172-31-34-153 kube-cluster]# vi kube-dependencies.yaml
- hosts: all
  become: yes
  tasks:
   - name: Install yum utils
         yum:
         name: yum-utils
         state: latest

   - name: Install device-mapper-persistent-data
         yum:
         name: device-mapper-persistent-data
         state: latest

   - name: Install lvm2
         yum:
         name: lvm2
         state: latest

   - name: Add Docker repo
      get_url:
        url: https://download.docker.com/linux/centos/docker-ce.repo
        dest: /etc/yum.repos.d/docer-ce.repo 

   - name: install Docker
         yum:
         name: docker-ce
         state: latest
         update_cache: true

   - name: start Docker
     service:
       name: docker
       state: started
	   enabled: yes

   - name: disable SELinux
     command: setenforce 0

   - name: disable SELinux on reboot
     selinux:
       state: disabled

   - name: ensure net.bridge.bridge-nf-call-ip6tables is set to 1
     sysctl:
      name: net.bridge.bridge-nf-call-ip6tables
      value: 1
      state: present

   - name: ensure net.bridge.bridge-nf-call-iptables is set to 1
     sysctl:
      name: net.bridge.bridge-nf-call-iptables
      value: 1
      state: present

   - name: add Kubernetes' YUM repository
     yum_repository:
      name: Kubernetes
      description: Kubernetes YUM repository
      baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
      gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
      gpgcheck: yes

   - name: install kubelet
     yum:
        name: kubelet-1.17.3
        state: present
        update_cache: true

   - name: install kubeadm
     yum:
        name: kubeadm-1.17.3
        state: present

   - name: start kubelet
     service:
       name: kubelet
       enabled: yes
       state: started

- hosts: master
  become: yes
  tasks:
   - name: install kubectl
     yum:
        name: kubectl-1.17.3
        state: present
        allow_downgrade: yes

执行

[root@ip-172-31-34-153 kube-cluster]# ansible-playbook -i ./hosts kube-dependencies.yaml

PLAY [all] *****************************************************************************************************

TASK [Gathering Facts] *****************************************************************************************
ok: [worker1]
ok: [worker2]
ok: [master]

TASK [Install yum utils] ***************************************************************************************
changed: [worker1]
changed: [master]
changed: [worker2]

TASK [Install device-mapper-persistent-data] *******************************************************************
changed: [worker1]
changed: [worker2]
changed: [master]

TASK [Install lvm2] ********************************************************************************************
changed: [worker2]
changed: [worker1]
changed: [master]

TASK [Add Docker repo] *****************************************************************************************
changed: [worker2]
changed: [worker1]
changed: [master]

TASK [install Docker] ******************************************************************************************
changed: [worker1]
changed: [worker2]
changed: [master]

TASK [start Docker] ********************************************************************************************
changed: [worker1]
changed: [master]
changed: [worker2]

TASK [disable SELinux] *****************************************************************************************
changed: [worker2]
changed: [worker1]
changed: [master]

TASK [disable SELinux on reboot] *******************************************************************************
[WARNING]: SELinux state change will take effect next reboot
changed: [worker2]
changed: [worker1]
changed: [master]

TASK [ensure net.bridge.bridge-nf-call-ip6tables is set to 1] **************************************************
[WARNING]: The value 1 (type int) in a string field was converted to u'1' (type string). If this does not look
like what you expect, quote the entire value to ensure it does not change.
changed: [worker2]
changed: [worker1]
changed: [master]

TASK [ensure net.bridge.bridge-nf-call-iptables is set to 1] ***************************************************
changed: [worker1]
changed: [worker2]
changed: [master]

TASK [add Kubernetes' YUM repository] **************************************************************************
changed: [worker2]
changed: [worker1]
changed: [master]

TASK [install kubelet] *****************************************************************************************
changed: [worker1]
changed: [worker2]
changed: [master]

TASK [install kubeadm] *****************************************************************************************
changed: [worker1]
changed: [worker2]
changed: [master]

TASK [start kubelet] *******************************************************************************************
changed: [worker2]
changed: [worker1]
changed: [master]

PLAY [master] **************************************************************************************************

TASK [Gathering Facts] *****************************************************************************************
ok: [master]

TASK [install kubectl] *****************************************************************************************
ok: [master]

PLAY RECAP *****************************************************************************************************
master : ok=17 changed=14 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 
worker1 : ok=15 changed=14 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 
worker2 : ok=15 changed=14 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0

[root@ip-172-31-34-153 kube-cluster]#

准备主节点Palybook配置文件(k8s-01)

[root@ip-172-31-34-153 kube-cluster]# vi master.yaml
- hosts: master
  become: yes
  tasks:
    - name: initialize the cluster
      shell: kubeadm init --pod-network-cidr=10.244.0.0/16 >> cluster_initialized.txt
      args:
        chdir: $HOME
        creates: cluster_initialized.txt

    - name: create .kube directory
      become: yes
      become_user: centos
      file:
        path: $HOME/.kube
        state: directory
        mode: 0755

    - name: copy admin.conf to user's kube config
      copy:
        src: /etc/kubernetes/admin.conf
        dest: /home/centos/.kube/config
        remote_src: yes
        owner: centos

    - name: install Pod network
      become: yes
      become_user: centos
      shell: kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml >> pod_network_setup.txt
      args:
        chdir: $HOME
        creates: pod_network_setup.txt

执行

[root@ip-172-31-34-153 kube-cluster]# ansible-playbook -i ./hosts master.yaml

PLAY [master] **************************************************************************************************

TASK [Gathering Facts] *****************************************************************************************
ok: [master]

TASK [initialize the cluster] **********************************************************************************
ok: [master]

TASK [create .kube directory] **********************************************************************************
[WARNING]: Module remote_tmp /home/centos/.ansible/tmp did not exist and was created with a mode of 0700, this
may cause issues when running as another user. To avoid this, create the remote_tmp dir with the correct
permissions manually
changed: [master]

TASK [copy admin.conf to user's kube config] *******************************************************************
changed: [master]

TASK [install Pod network] *************************************************************************************
changed: [master]

PLAY RECAP *****************************************************************************************************
master : ok=5 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0

[root@ip-172-31-34-153 kube-cluster]#

使用非特权用户centos验证Kubernetes集群及主节点状态

[centos@k8s-01 ~]$ kubectl get nodes
NAME     STATUS   ROLES    AGE    VERSION
k8s-01   Ready    master   155m   v1.17.3
[centos@k8s-01 ~]$

准备工作节点Playbook配置文件(k8s-02/k8s-03)

[root@ip-172-31-34-153 kube-cluster]# vi workers.yaml
- hosts: master
  become: yes
  gather_facts: false
  tasks:
    - name: get join command
      shell: kubeadm token create --print-join-command
      register: join_command_raw

    - name: set join command
      set_fact:
        join_command: "{{ join_command_raw.stdout_lines[0] }}"


- hosts: workers
  become: yes
  tasks:
    - name: join cluster
      shell: "{{ hostvars['master'].join_command }} --ignore-preflight-errors all  >> node_joined.txt"
      args:
        chdir: $HOME
        creates: node_joined.txt

执行

[root@ip-172-31-34-153 kube-cluster]# ansible-playbook -i hosts workers.yaml

PLAY [master] **************************************************************************************************

TASK [get join command] ****************************************************************************************
changed: [master]

TASK [set join command] ****************************************************************************************
ok: [master]

PLAY [workers] *************************************************************************************************

TASK [Gathering Facts] *****************************************************************************************
ok: [worker2]
ok: [worker1]

TASK [join cluster] ********************************************************************************************
changed: [worker2]
changed: [worker1]

PLAY RECAP *****************************************************************************************************
master : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 
worker1 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 
worker2 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0

[root@ip-172-31-34-153 kube-cluster]#

验证集群状态

[centos@k8s-01 ~]$ kubectl get nodes
NAME     STATUS   ROLES    AGE    VERSION
k8s-01   Ready    master   159m   v1.17.3
k8s-02   Ready    <none>   41s    v1.17.3
k8s-03   Ready    <none>   41s    v1.17.3
[centos@k8s-01 ~]$

部署容器化应用程序进行测试

[centos@k8s-01 ~]$ kubectl create deployment nginx --image=nginx
deployment.apps/nginx created
[centos@k8s-01 ~]$ 

[centos@k8s-01 ~]$ kubectl expose deploy nginx --port 80 --target-port 80 --type NodePort
service/nginx exposed
[centos@k8s-01 ~]$ 

[centos@k8s-01 ~]$ kubectl get services
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        179m
nginx        NodePort    10.109.120.31   <none>        80:30596/TCP   15s
[centos@k8s-01 ~]$

使用浏览器访问

删除已部署的容器化应用

[centos@k8s-01 ~]$ kubectl delete service nginx
service "nginx" deleted
[centos@k8s-01 ~]$ kubectl get service
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   3h4m
[centos@k8s-01 ~]$ 

[centos@k8s-01 ~]$ kubectl delete deployment nginx
deployment.apps "nginx" deleted
[centos@k8s-01 ~]$ kubectl get deployments
No resources found in default namespace.
[centos@k8s-01 ~]$

客户端命令行接口配置文件详情(基于PKI体系的服务器及客户端证书验证)

apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJd01ETXdOekE1TVRNeU1Wb1hEVE13TURNd05UQTVNVE15TVZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHl4Ckx2M25ESzZHaDgxN1pjWmpqUVV5em13RlVvdzZhZDV1T1Jabzg2Q0tsNW52RnF3VjRYL2E4OGx2S1grc2xqWDkKSDZGR2Y2bm1uM2JMTnlXWWEreThGcllUMHBQR2x3aG5qWE1WSkJlUW9SS2NiK2hySERPZlNGZ0xsZjQ0TWR1VwpPd3Vmb2VTYnJpL3hoZ0ExMXhqbStmVGJNV3ZkNkZVM0h6ZW9WeEtsdVJNcmJVL0YySHFVN0R1ZEV6dUNQUWFsCk1OOUxiblZJcUtwREp5VzhmODY1V29MUHJlWjhMZkZqMVQvMXl2ZEk1dkJwTFBKc0NZUndLdndSTEhZajAzTHMKRVA5QlpuRkhNRDYwV3RuZXc4bkdaRjJkWTdIRHZRa1V2M2hoemtVMXRLa3BncWhvM2tCUytoUUNwUEpLMzZLMgplOG9aT2NrTDJsYjJzTmpBck84Q0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJTnFSMXYwUGVPKy9TR05OcXN6S2MzNHk5RGkKVjA5dFZoemRRNEV6aGtoM0ZOS0RRMDZ0VTNPcUw2dzREano2SnlwSW9IaGVsTXVxVmJtY0V5WE9WNzYwZ0hPRQpJaWJ0ZlJhcVdPMVc2RXE0NklQbjEwZkFWNzRwNjhVcWdQdjkra0RSb2grYWhobFJFTGJJdTJNcjAzNHBjcWduClZSK01lWGZ6Q0VvalF3dzd0ZVJGNnpMTCtQa0duNHI4L24rNFNGUjJIK2lDVCtiVzNxZWdCYi9MWWwyTmNMTHMKVDEvcnROZnFTaEIyV2dYbXZKUkl2YXRIWWtUdUZCN1IwZ0pkQUJJWXdkSGlpbVN4TkdDK05WRzIzL3BDdmRKUApFcjFPd2xuWFBMSStiOHpXNDNEanVjd0pPdTY0alZEVmduNUpJUDZqNjRuYnN2eC9VSkIvOUZNK0lVST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    server: https://172.31.43.3:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJY0trbWVQMXNaZnd3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TURBek1EY3dPVEV6TWpGYUZ3MHlNVEF6TURjd09URXpNak5hTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXhTbFZmb1IyVTQ2UGdCbzAKR2kyL2NROFEzVldCcVpaNzRwM3cxZ1pDS2dzaUhya3RGWTdrTm52Y3hLNXVPRjZIN1YxS0JrYmRUNXZvVlZ2YQpFRlY3TU5RZUZ6RDEzWkFKK2dOVFN5RFUrY21qT2xnQW1xMktZeHdKbTNBNUdnNFRSbVpUN01mS3FxMVc4V2lxClZlWkY1cnViUkdpb3Z0WWR5L3BHUEs1b0dJaWtpd2w0QU9SMXFGRG80ejR3SmtyMEd5OUxSSzhNZ0RkeEhrSk0KQklrZ2QrbnFpODBGZUpLM2JzWTBjUG9LYk9QbEx4Vm9XQW5iUWEyNjVqYXBQbitNdEpKWkdRelFwYXhranE5RApvek1Pa3pnV0dQMFZKcC9CUXFINGI5NTFXaUFpNTMwbVlvVTVRUDJwaFR6amtUbG1PQlErd3hoZDNKaU9TdjUwCkVmdkdHUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFIdSt5MjRxa2F0Y21rZkJYRUtrUXg1SUdvNm9Ud0FIcnRqdQo5SUw1MTZ4cVZPMlUvblAwM3hqbHBVdkFSR1dSU3czRjZsanNkUTM5VS9DMGQ2SVNGT0t4K2VkMFE5MVptYW03CnNib0liaXJSeDdVa3ErdThoS3dRK1Zad1Z0akdLUWYwclB2STFkb2drcHJldkF2Myt3OUdld3p5Y0NqemxIbE0KU09pdFdYYkdpdzBoWmk3a25lYmdMQVEvdkVVSlFrNFNVK21oMTJIaVNZY0R2WlJOZkJOUzNONnpPMnZXUGFrcwpFMVIvZ1BBTmlMMllTSXpnQVAwSyszTzJGVzc1SndLa3dXUlNEM1NIZWQxbTZIYlVGcTlBUEdWOXB1eHJTZXJoCkF0T2QzbTdIUnRCS3Q1L29ZaUNva1NBRjZIR1hJcCtEYTFBMFZQRkU0YlVkQjl5MUlHWT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeFNsVmZvUjJVNDZQZ0JvMEdpMi9jUThRM1ZXQnFaWjc0cDN3MWdaQ0tnc2lIcmt0CkZZN2tObnZjeEs1dU9GNkg3VjFLQmtiZFQ1dm9WVnZhRUZWN01OUWVGekQxM1pBSitnTlRTeURVK2Ntak9sZ0EKbXEyS1l4d0ptM0E1R2c0VFJtWlQ3TWZLcXExVzhXaXFWZVpGNXJ1YlJHaW92dFlkeS9wR1BLNW9HSWlraXdsNApBT1IxcUZEbzR6NHdKa3IwR3k5TFJLOE1nRGR4SGtKTUJJa2dkK25xaTgwRmVKSzNic1kwY1BvS2JPUGxMeFZvCldBbmJRYTI2NWphcFBuK010SkpaR1F6UXBheGtqcTlEb3pNT2t6Z1dHUDBWSnAvQlFxSDRiOTUxV2lBaTUzMG0KWW9VNVFQMnBoVHpqa1RsbU9CUSt3eGhkM0ppT1N2NTBFZnZHR1FJREFRQUJBb0lCQVFDQ2s1bDNyU3JncyszKwpIVnljYWVmOGJNbnlqSXJQVWtiQ0UzQkpqdU9MRE15UUpIdmpaenRsaWlyd1o4Vy90M3Uyaks1VjhlRG90SXp1CjIySlVwd2hya2xCTGM3V2lBNTlYNFpQc2tkWDdpTHQrRElKNTdxMVVibUUrZk5pVWxQWFhEalpPL3hNT2JyYkMKTTF0OGdJR1RDblVPblhJRTBiSHlRZEw2cFZkenh3Ri9EeFNNTy9zOGxLOEh3K0RzT0xxU3FPbHoyOUpuYk9CeAp1aEMzK3VMalc4Rmpsblh6K25JQWRaWFZoRkp0dG43a1dkak1jZXkyTGZCc1NZbGZlWlhZaTRGTE8xbmNPWGpuCkYwLzNhU2g0UmtPeXZvZDZRSEVxTmFnS0ZPOUZqd29hQzRmWkxLQjBrTG16UlZYa1BiR2lDRXB1N1ozSEw0c3UKaFRaYTNUekJBb0dCQU8zMXlBWDVtYTR2U3FlK2V5eEx2L201WEhtb2QweDhXNUFkcU51ZzNuRjdUSE4zMXppbQpmYVBwTjd4R2lwcXNwMVlGQzBheC9FZDNJYW1RcWVSRlRtTHgrRmttb3NNSThBbUV2U0EvL0JTVWVhYTUzeWtwCkt1NXEzNFBWWW5OSXZpcWpTM1ZITERGckw5MlUzNnVBTk9uMTJwZUw3ek1kOXVOT0srNlV3L20xQW9HQkFOUWIKd0g0RWRUbVAwS2V5V0hmYlBheFhxSVJqV2xLeFhHTU5JcnRVZWNLQ0NadWFTNnE5TFYxWk5KZkMyamN3TFRKMApDMVB2RkNjWjAwRUFScGlkS2lYL0ZaQzloRHZ6TkpsUnRseGs0aGVZVUVoa0lQL1RtcVUvTWZhSEhBREhlbDNCCkNPL1BuUnU5Y3g0NmwxZjBOcm5XRVJoa2J5TTJ4Mzc1ek5xb0tJbFZBb0dBUzhxKy9QZzFOTCtuWGFwVC9SWGIKZmFUR2laRlkvaW1WMkY4NkMwby96NUZnRmw4VFU5M2pvck9EcHhvb3gzODZoVEZ5R0FCVXhFWnptRmlWWkRtVwo3L2oyQ3g4OU5EWENqcVdTdjVUaHE0Um5BdTJzNEtWV0lUNDFGdjUrTHczNlZBWlM0SFhjNDVpcVZEODR4cDA5ClBVK3JZaDJXQUlnSXZQbUhFS1NkandrQ2dZQm53dHU3eWZwK21qZjhrV1p0MjdhajVJM3ZsWnJOOFMyODF1UXkKdC9TSWpveWNyakp0NS9XVlFOcFZrMkNrdHRDbGFkZFF6QmdUdUxKN2plTDdMWWM4NXpocGdneDZOMU4zM1YxVQpmWldNN1ZuNHorTEV3NE5YYXo3SjF2Wi8reFdGWDdVN2UxamtCUjJYb0JvQlVOcWt0bS9PZXZOVFNxejFGTVorCkFOMHpzUUtCZ1FDaDROSlEvVjhhc3prOURnZ2F5bnZ1Z2JWWVg1R0lFNGRSRng3Z3dXek5BckI0V1pUODVHeDgKSzByN3BLdTJsYmh2OFE1UU9GdFFhS0JwcCtjb1g2a3cvbTJZdWdYeVdiREpScEY4ODJXbkQzYWhvbW10WTlXZgpOWmJkeGRXNk8xZ1dURTg1ODV3YW5uOWFZR3g5Q21xNDJ4Sk9SaURPakFZWWEyR3phTHI2SHc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

命令行工具kubectl基本命令参数

[centos@k8s-01 ~]$ kubectl 
kubectl controls the Kubernetes cluster manager.

 Find more information at: https://kubernetes.io/docs/reference/kubectl/overview/

Basic Commands (Beginner):
  create         Create a resource from a file or from stdin.
  expose         Take a replication controller, service, deployment or pod and expose it as a new
Kubernetes Service
  run            Run a particular image on the cluster
  set            Set specific features on objects

Basic Commands (Intermediate):
  explain        Documentation of resources
  get            Display one or many resources
  edit           Edit a resource on the server
  delete         Delete resources by filenames, stdin, resources and names, or by resources and
label selector

Deploy Commands:
  rollout        Manage the rollout of a resource
  scale          Set a new size for a Deployment, ReplicaSet or Replication Controller
  autoscale      Auto-scale a Deployment, ReplicaSet, or ReplicationController

Cluster Management Commands:
  certificate    Modify certificate resources.
  cluster-info   Display cluster info
  top            Display Resource (CPU/Memory/Storage) usage.
  cordon         Mark node as unschedulable
  uncordon       Mark node as schedulable
  drain          Drain node in preparation for maintenance
  taint          Update the taints on one or more nodes

Troubleshooting and Debugging Commands:
  describe       Show details of a specific resource or group of resources
  logs           Print the logs for a container in a pod
  attach         Attach to a running container
  exec           Execute a command in a container
  port-forward   Forward one or more local ports to a pod
  proxy          Run a proxy to the Kubernetes API server
  cp             Copy files and directories to and from containers.
  auth           Inspect authorization

Advanced Commands:
  diff           Diff live version against would-be applied version
  apply          Apply a configuration to a resource by filename or stdin
  patch          Update field(s) of a resource using strategic merge patch
  replace        Replace a resource by filename or stdin
  wait           Experimental: Wait for a specific condition on one or many resources.
  convert        Convert config files between different API versions
  kustomize      Build a kustomization target from a directory or a remote url.

Settings Commands:
  label          Update the labels on a resource
  annotate       Update the annotations on a resource
  completion     Output shell completion code for the specified shell (bash or zsh)

Other Commands:
  api-resources  Print the supported API resources on the server
  api-versions   Print the supported API versions on the server, in the form of "group/version"
  config         Modify kubeconfig files
  plugin         Provides utilities for interacting with plugins.
  version        Print the client and server version information

Usage:
  kubectl [flags] [options]

Use "kubectl <command> --help" for more information about a given command.
Use "kubectl options" for a list of global command-line options (applies to all commands).
[centos@k8s-01 ~]$

内容引用

https://www.digitalocean.com/community/tutorials/how-to-create-a-kubernetes-cluster-using-kubeadm-on-centos-7
3 月 042020
 

在主机k8s-1上设置主机名并禁用selinux设置

[root@ip-172-31-37-25 ~]# hostnamectl set-hostname k8s-1
[root@ip-172-31-37-25 ~]# sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
[root@ip-172-31-37-25 ~]# init 6

在主机k8s-1上安装docker服务

[root@k8s-1 ~]# yum -y install yum-utils
[root@k8s-1 ~]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
Loaded plugins: fastestmirror
adding repo from: https://download.docker.com/linux/centos/docker-ce.repo
grabbing file https://download.docker.com/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@k8s-1 ~]#

查看当前docker版本信息

[root@k8s-1 ~]# yum info docker-ce
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: d36uatko69830t.cloudfront.net
 * extras: d36uatko69830t.cloudfront.net
 * updates: d36uatko69830t.cloudfront.net
docker-ce-stable                                                                   | 3.5 kB  00:00:00     
(1/2): docker-ce-stable/x86_64/primary_db                                          |  40 kB  00:00:00     
(2/2): docker-ce-stable/x86_64/updateinfo                                          |   55 B  00:00:00     
Available Packages
Name        : docker-ce
Arch        : x86_64
Epoch       : 3
Version     : 19.03.7
Release     : 3.el7
Size        : 25 M
Repo        : docker-ce-stable/x86_64
Summary     : The open-source application container engine
URL         : https://www.docker.com
License     : ASL 2.0
Description : Docker is a product for you to build, ship and run any application as a
            : lightweight container.
            : 
            : Docker containers are both hardware-agnostic and platform-agnostic. This means
            : they can run anywhere, from your laptop to the largest cloud compute instance and
            : everything in between - and they don't require you to use a particular
            : language, framework or packaging system. That makes them great building blocks
            : for deploying and scaling web apps, databases, and backend services without
            : depending on a particular stack or provider.

[root@k8s-1 ~]#

安装docker服务

[root@k8s-1 ~]# yum -y install docker-ce

修改docker启动配置文件

[root@k8s-1 ~]# mkdir -p /etc/docker
[root@k8s-1 ~]# cat > /etc/docker/daemon.json <<EOF
> {
>   "exec-opts": ["native.cgroupdriver=systemd"],
>   "log-driver": "json-file",
>   "log-opts": {
>     "max-size": "100m"
>   },
>   "storage-driver": "overlay2",
>   "storage-opts": [
>     "overlay2.override_kernel_check=true"
>   ]
> }
> EOF
[root@k8s-1 ~]#

注册并启动服务

[root@k8s-1 ~]# mkdir -p /etc/systemd/system/docker.service.d
[root@k8s-1 ~]# 
[root@k8s-1 ~]# systemctl daemon-reload
[root@k8s-1 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@k8s-1 ~]# systemctl start docker
[root@k8s-1 ~]#

修改内核参数

[root@k8s-1 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOF
[root@k8s-1 ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
* Applying /etc/sysctl.conf ...
[root@k8s-1 ~]#

安装kubernetes仓库并安装服务

[root@k8s-1 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
> [kubernetes]
> name=Kubernetes
> baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=1
> repo_gpgcheck=1
> gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
> exclude=kube*
> EOF
[root@k8s-1 ~]#

[root@k8s-1 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

[root@k8s-1 ~]# systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@k8s-1 ~]#

—-

在主机k8s-2上设置主机名并禁用selinux设置

[root@ip-172-31-45-40 ~]# hostnamectl set-hostname k8s-2
[root@ip-172-31-45-40 ~]# sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
[root@ip-172-31-45-40 ~]# init 6

在主机k8s-2上安装docker服务

[root@k8s-2 ~]# yum -y install yum-utils
[root@k8s-2 ~]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
Loaded plugins: fastestmirror
adding repo from: https://download.docker.com/linux/centos/docker-ce.repo
grabbing file https://download.docker.com/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo
[root@k8s-2 ~]#

安装docker服务

[root@k8s-2 ~]# yum -y install docker-ce

修改docker启动配置文件

[root@k8s-2 ~]# mkdir -p /etc/docker
[root@k8s-2 ~]# cat > /etc/docker/daemon.json <<EOF
> {
>   "exec-opts": ["native.cgroupdriver=systemd"],
>   "log-driver": "json-file",
>   "log-opts": {
>     "max-size": "100m"
>   },
>   "storage-driver": "overlay2",
>   "storage-opts": [
>     "overlay2.override_kernel_check=true"
>   ]
> }
> EOF
[root@k8s-2 ~]#

注册并启动服务

[root@k8s-2 ~]# mkdir -p /etc/systemd/system/docker.service.d
[root@k8s-2 ~]# 
[root@k8s-2 ~]# systemctl daemon-reload
[root@k8s-2 ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@k8s-2 ~]# systemctl start docker
[root@k8s-2 ~]#

修改内核参数

[root@k8s-2 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOF
[root@k8s-2 ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
* Applying /etc/sysctl.conf ...
[root@k8s-2 ~]#

安装kubernetes仓库并安装服务

[root@k8s-2 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
> [kubernetes]
> name=Kubernetes
> baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=1
> repo_gpgcheck=1
> gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
> exclude=kube*
> EOF
[root@k8s-2 ~]# 

[root@k8s-2 ~]# yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

[root@k8s-2 ~]# systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@k8s-2 ~]#

—-

初始化主节点

[root@k8s-1 ~]# kubeadm init --apiserver-advertise-address=172.31.37.25 --pod-network-cidr=10.244.0.0/16
W0304 12:53:01.696504    4233 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0304 12:53:01.696701    4233 validation.go:28] Cannot validate kubelet config - no validator is available
[init] Using Kubernetes version: v1.17.3
[preflight] Running pre-flight checks
        [WARNING Hostname]: hostname "k8s-1" could not be reached
        [WARNING Hostname]: hostname "k8s-1": lookup k8s-1 on 172.31.0.2:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.31.37.25]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-1 localhost] and IPs [172.31.37.25 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-1 localhost] and IPs [172.31.37.25 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0304 12:53:27.750650    4233 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0304 12:53:27.751872    4233 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 15.504674 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-1 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: w13kyt.okovvot763i4tnbm
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.31.37.25:6443 --token w13kyt.okovvot763i4tnbm \
    --discovery-token-ca-cert-hash sha256:16a89254e6f2df256954d3dccb24aadbc8ad3f40b3f806cf53d67b715a5284c8 
[root@k8s-1 ~]#

配置本地kubectl命令行运行环境

[root@k8s-1 ~]# mkdir $HOME/.kube
[root@k8s-1 ~]# cp /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-1 ~]#

下载配置文件

[root@k8s-1 ~]# curl -O https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100 14416  100 14416    0     0  32134      0 --:--:-- --:--:-- --:--:-- 32250
[root@k8s-1 ~]#

启动集群

[root@k8s-1 ~]# kubectl apply -f kube-flannel.yml 
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
[root@k8s-1 ~]#

查看集群状态

[root@k8s-1 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
[root@k8s-1 ~]# 

—-

节点k8s-2加入集群

[root@k8s-2 ~]# kubeadm join 172.31.37.25:6443 --token w13kyt.okovvot763i4tnbm \
>--discovery-token-ca-cert-hash sha256:16a89254e6f2df256954d3dccb24aadbc8ad3f40b3f806cf53d67b715a5284c8
W0304 13:00:11.341421    4312 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
        [WARNING Hostname]: hostname "k8s-2" could not be reached
        [WARNING Hostname]: hostname "k8s-2": lookup k8s-2 on 172.31.0.2:53: no such host
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.17" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@k8s-2 ~]#

在主节点k8s-1查看集群节点状态

[root@k8s-1 ~]# kubectl get nodes
NAME    STATUS   ROLES    AGE     VERSION
k8s-1   Ready    master   7m58s   v1.17.3
k8s-2   Ready    <none>   86s     v1.17.3
[root@k8s-1 ~]#