4月 212020
基础环境安装脚本(基于Amazon AWS EC2 CentOS 7环境)
#!/bin/bash # # setenforce 0; sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config; # cat <<EOF > /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sysctl -p; # yum makecache; yum install -y yum-utils; yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo; yum install -y docker-ce docker-ce-cli containerd.io; # cat <<EOF >> /etc/hosts 172.31.3.209 k8s01 172.31.8.132 k8s02 172.31.10.229 k8s03 EOF # mkdir /etc/docker; cat <<EOF > /etc/docker/daemon.json { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ] } EOF # systemctl daemon-reload; systemctl enable docker; systemctl restart docker; # cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg EOF # yum install -y kubectl kubelet kubeadm; systemctl enable kubelet;
执行脚本
[root@k8s01 ~]# vi deploy.sh [root@k8s01 ~]# chmod 700 deploy.sh [root@k8s01 ~]# ./deploy.sh
初始化master节点
kubeadm init --apiserver-advertise-address=172.31.14.12 --pod-network-cidr=10.244.0.0/16 [root@k8s01 ~]# kubeadm init --apiserver-advertise-address=172.31.14.12 --pod-network-cidr=10.244.0.0/16
配置本地命令环境
[root@k8s01 ~]# mkdir -p $HOME/.kube [root@k8s01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@k8s01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
节点加入集群
kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \ --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859
节点2加入
[root@k8s02 ~]# kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \ > --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859 W0420 10:23:48.432125 9198 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set. [preflight] Running pre-flight checks [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the control-plane to see this node join the cluster. [root@k8s02 ~]#
节点3加入
[root@k8s03 ~]# kubeadm join 172.31.14.12:6443 --token ghr4s0.13nh5q6f6ywt2oso \ > --discovery-token-ca-cert-hash sha256:d435ee7f3795a10b58762be903a78a99c719e3520fb029d718505095b37e9859 W0420 10:24:14.829097 9202 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set. [preflight] Running pre-flight checks [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the control-plane to see this node join the cluster. [root@k8s03 ~]#
安装flannel网络
[root@k8s01 ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml podsecuritypolicy.policy/psp.flannel.unprivileged created clusterrole.rbac.authorization.k8s.io/flannel created clusterrolebinding.rbac.authorization.k8s.io/flannel created serviceaccount/flannel created configmap/kube-flannel-cfg created daemonset.apps/kube-flannel-ds-amd64 created daemonset.apps/kube-flannel-ds-arm64 created daemonset.apps/kube-flannel-ds-arm created daemonset.apps/kube-flannel-ds-ppc64le created daemonset.apps/kube-flannel-ds-s390x created [root@k8s01 ~]#
获取节点信息
[root@k8s01 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s01 Ready master 12m v1.18.2 k8s02 Ready <none> 10m v1.18.2 k8s03 Ready <none> 10m v1.18.2 [root@k8s01 ~]#
查看集群组件状态
[root@k8s01 ~]# kubectl get cs NAME STATUS MESSAGE ERROR scheduler Healthy ok controller-manager Healthy ok etcd-0 Healthy {"health":"true"} [root@k8s01 ~]#
查看本地镜像列表
[root@k8s01 ~]# docker image ls REPOSITORY TAG IMAGE ID CREATED SIZE k8s.gcr.io/kube-proxy v1.18.2 0d40868643c6 3 days ago 117MB k8s.gcr.io/kube-scheduler v1.18.2 a3099161e137 3 days ago 95.3MB k8s.gcr.io/kube-apiserver v1.18.2 6ed75ad404bd 3 days ago 173MB k8s.gcr.io/kube-controller-manager v1.18.2 ace0a8c17ba9 3 days ago 162MB quay.io/coreos/flannel v0.12.0-amd64 4e9f801d2217 5 weeks ago 52.8MB k8s.gcr.io/pause 3.2 80d28bedfe5d 2 months ago 683kB k8s.gcr.io/coredns 1.6.7 67da37a9a360 2 months ago 43.8MB k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 5 months ago 288MB [root@k8s01 ~]#