kubernetes(十一)--kubeadm部署k8s高可用集群

一、系统初始化

系统初始化参考:https://www.cnblogs.com/hujinzhong/p/12251486.html

节点准备:

节点 ip
master01 10.0.0.11
master02 10.0.0.12
master03 10.0.0.13
node01 10.0.0.21
node02 10.0.0.22
k8s-vip 10.0.0.100

以一台master节点为例:

1)主机名及hosts解析

[root@master01 ~]# hostnamectl set-hostname k8s-master01
[root@master01 ~]# vim /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.11  master01
10.0.0.12  master02
10.0.0.13  master03
10.0.0.21  node01
10.0.0.22  node02
10.0.0.100 k8s-vip

2)安装依赖包

[root@master01 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@master01 ~]# yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git

3)防火墙/swap/selinux

#防火墙
[root@master01 ~]# systemctl stop firewalld && systemctl disable firewalld
[root@master01 ~]# yum install -y iptables-services && systemctl start iptables && systemctl enable iptables &&iptables -F && service iptables save

#selinix
[root@master01 ~]# setenforce 0 && sed -i 's#^SELINUX=.*#SELINUX=disabled#g' /etc/selinux/config
setenforce: SELinux is disabled
[root@master01 ~]# getenforce
Disabled

#关闭swap
[root@master01 ~]# swapoff -a && sed -r -i '/swap/s@(.*)@#1@g' /etc/fstab

4)内核升级

[root@master01 ~]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
[root@master01 ~]# yum --enablerepo=elrepo-kernel install -y kernel-lt
[root@master01 ~]# grub2-set-default "CentOS Linux (4.4.182-1.el7.elrepo.x86_64) 7 (Core)"
[root@master01 ~]# reboot
[root@k8s-master01 ~]# uname -r
4.4.213-1.el7.elrepo.x86_64

5)关闭NUMA

[root@k8s-master01 ~]# cp /etc/default/grub{,.bak}
[root@k8s-master01 ~]# vim /etc/default/grub # 在 GRUB_CMDLINE_LINUX 一行添加 `numa=off` 参数
GRUB_CMDLINE_LINUX="biosdevname=0 net.ifnames=0 rhgb quiet numa=off"
[root@k8s-master01 ~]# cp /boot/grub2/grub.cfg{,.bak}
[root@k8s-master01 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg

6)内核参数调整

cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

[root@k8s-master01 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf

7)时区调整

[root@k8s-master01 ~]# timedatectl set-timezone Asia/Shanghai
[root@k8s-master01 ~]# timedatectl set-local-rtc 0
[root@k8s-master01 ~]# systemctl restart rsyslog && systemctl restart crond

8)关闭不必要服务

[root@k8s-master01 ~]# systemctl stop postfix && systemctl disable postfix

9)设置 rsyslogd 和 systemd journald

[root@k8s-master01 ~]# mkdir /var/log/journal
[root@k8s-master01 ~]# mkdir /etc/systemd/journald.conf.d

cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
#持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间10G
SystemMaxUse=10G
# 单日志文件最大200M
SystemMaxFileSize=200M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF

[root@master03 ~]# systemctl restart systemd-journald

10)kube-proxy开启ipvs的前提设置

[root@k8s-master01 ~]# modprobe br_netfilter

cat  >  /etc/sysconfig/modules/ipvs.modules  <<EOF
#!/bin/bash
modprobe  --  ip_vs
modprobe  --  ip_vs_rr
modprobe  --  ip_vs_wrr
modprobe  --  ip_vs_sh
modprobe  --  nf_conntrack_ipv4
EOF

[root@k8s-master01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules &&lsmod | grep -e ip_vs -e nf_conntrack_ipv4

11)docker安装

[root@k8s-master01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-master01 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo[root@k8s-master01 ~]# yum update -y && yum install -y docker-ce

[root@k8s-master01 ~]# mkdir /etc/docker -p
[root@k8s-master01 ~]# mkdir -p  /etc/systemd/system/docker.service.d

cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
    "max-size": "100m"
    }
}
EOF

[root@master03 ~]# systemctl daemon-reload && systemctl start docker && systemctl enable docker

12)导入相关镜像

[root@k8s-master01 ~]# mkdir /root/k8s -p
[root@k8s-master01 ~]# cd k8s/
[root@k8s-master01 k8s]# docker load -i keepalived.tar
[root@k8s-master01 k8s]# docker load -i haproxy.tar
[root@k8s-master01 k8s]# docker images
REPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-apiserver            v1.15.1             68c3eb07bfc3        6 months ago        207MB
k8s.gcr.io/kube-scheduler            v1.15.1             b0b3c4c404da        6 months ago        81.1MB
k8s.gcr.io/kube-proxy                v1.15.1             89a062da739d        6 months ago        82.4MB
k8s.gcr.io/kube-controller-manager   v1.15.1             d75082f1d121        6 months ago        159MB
k8s.gcr.io/coredns                   1.3.1               eb516548c180        12 months ago       40.3MB
k8s.gcr.io/etcd                      3.3.10              2c4adeb21b4f        14 months ago       258MB
wise2c/keepalived-k8s                latest              0ba6a7862982        20 months ago       14MB
wise2c/haproxy-k8s                   latest              fde31577093d        20 months ago       71.1MB
k8s.gcr.io/pause                     3.1                 da86e6ba6ca1        2 years ago         742kB

二、启动haproxy容器

[root@k8s-master01 lb]# cat start-haproxy.sh 
#!/bin/bash
MasterIP1=10.0.0.11
MasterIP2=10.0.0.12
MasterIP3=10.0.0.13
MasterPort=6443

docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 
        -e MasterIP1=$MasterIP1 
        -e MasterIP2=$MasterIP2 
        -e MasterIP3=$MasterIP3 
        -e MasterPort=$MasterPort 
        -v /data/lb/etc/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg 
        wise2c/haproxy-k8s

[root@k8s-master01 lb]# cat /data/lb/etc/haproxy.cfg
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
#chroot /usr/share/haproxy
#user haproxy
#group haproxy
daemon

defaults
    log     global
    mode    http
    option  httplog
    option  dontlognull
    retries 3
    option redispatch
    timeout connect  5000
    timeout client  50000
    timeout server  50000

frontend stats-front
  bind *:8081
  mode http
  default_backend stats-back

frontend fe_k8s_6444
  bind *:6444
  mode tcp
  timeout client 1h
  log global
  option tcplog
  default_backend be_k8s_6443
  acl is_websocket hdr(Upgrade) -i WebSocket
  acl is_websocket hdr_beg(Host) -i ws

backend stats-back
  mode http
  balance roundrobin
  stats uri /haproxy/stats
  stats auth pxcstats:secret

backend be_k8s_6443
  mode tcp
  timeout queue 1h
  timeout server 1h
  timeout connect 1h
  log global
  balance roundrobin
  server rancher01 10.0.0.11:6443

[root@k8s-master01 lb]# ./start-haproxy.sh
[root@k8s-master01 lb]# netstat -lntp|grep 6444
tcp6       0      0 :::6444                 :::*                    LISTEN      40073/docker-proxy

三、启动keepalive容器

[root@k8s-master01 lb]# cat start-keepalived.sh
#!/bin/bash
VIRTUAL_IP=10.0.0.100
INTERFACE=eth0
NETMASK_BIT=24
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18

docker run -itd --restart=always --name=Keepalived-K8S 
        --net=host --cap-add=NET_ADMIN 
        -e VIRTUAL_IP=$VIRTUAL_IP 
        -e INTERFACE=$INTERFACE 
        -e CHECK_PORT=$CHECK_PORT 
        -e RID=$RID 
        -e VRID=$VRID 
        -e NETMASK_BIT=$NETMASK_BIT 
        -e MCAST_GROUP=$MCAST_GROUP 
        wise2c/keepalived-k8s

[root@k8s-master01 lb]# ./start-keepalived.sh 
3792352f22407eed1962a213ec82b1f00935b55b951704064c86142998bc4594

[root@k8s-master01 lb]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:97:61:29 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.11/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.100/24 scope global secondary eth0
       valid_lft forever preferred_lft forever
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default 
    link/ether 02:42:9f:ca:92:e2 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
5: veth91153da@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default 
    link/ether 92:fb:b5:91:09:bf brd ff:ff:ff:ff:ff:ff link-netnsid 0

四、kubeadm安装

cat  <<EOF >  /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

[root@k8s-master01 lb]# yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
[root@k8s-master01 lb]# systemctl enable kubelet.service

五、第一个主节点初始化

[root@k8s-master01 k8s]# kubeadm config print init-defaults > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.0.0.11
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "10.0.0.100:6444"  #vip地址
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}


#初始化主节点
[root@k8s-master01 k8s]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
Flag --experimental-upload-certs has been deprecated, use --upload-certs instead
[init] Using Kubernetes version: v1.15.1
[preflight] Running pre-flight checks
	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.5. Latest validated version: 18.09
	[WARNING Hostname]: hostname "k8s-master01" could not be reached
	[WARNING Hostname]: hostname "k8s-master01": lookup k8s-master01 on 223.5.5.5:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.11 10.0.0.100]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [10.0.0.11 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [10.0.0.11 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.508717 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
40d156da637185cb06a0ca91613c49840db34b21e09192e648a16249b66005a3
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 10.0.0.100:6444 --token abcdef.0123456789abcdef 
    --discovery-token-ca-cert-hash sha256:f54325af8cd8f83189b47814aed0f459ef29cd6aa90313d7f777ad5cacc88ddb 
    --control-plane --certificate-key 40d156da637185cb06a0ca91613c49840db34b21e09192e648a16249b66005a3

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use 
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.0.0.100:6444 --token abcdef.0123456789abcdef 
    --discovery-token-ca-cert-hash sha256:f54325af8cd8f83189b47814aed0f459ef29cd6aa90313d7f777ad5cacc88ddb

[root@k8s-master01 k8s]# mkdir -p $HOME/.kube
[root@k8s-master01 k8s]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master01 k8s]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@k8s-master01 k8s]# kubectl get node
NAME           STATUS     ROLES    AGE     VERSION
k8s-master01   NotReady   master   2m28s   v1.15.1

#安装flannel
[root@k8s-master01 k8s]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@k8s-master01 k8s]# kubectl create -f kube-flannel.yml
[root@k8s-master01 k8s]# kubectl get pod -n kube-system
NAME                                   READY   STATUS              RESTARTS   AGE
coredns-5c98db65d4-m4c9s               0/1     ContainerCreating   0          11m
coredns-5c98db65d4-pp925               0/1     ContainerCreating   0          11m
etcd-k8s-master01                      1/1     Running             0          11m
kube-apiserver-k8s-master01            1/1     Running             0          11m
kube-controller-manager-k8s-master01   1/1     Running             0          11m
kube-flannel-ds-amd64-7q7g4            1/1     Running             0          3m2s
kube-proxy-zpwfz                       1/1     Running             0          11m
kube-scheduler-k8s-master01            1/1     Running             0          10m
[root@k8s-master01 k8s]# kubectl get node
NAME           STATUS   ROLES    AGE   VERSION
k8s-master01   Ready    master   12m   v1.15.1

六、后续master节点初始化

  kubeadm join 10.0.0.100:6444 --token abcdef.0123456789abcdef 
    --discovery-token-ca-cert-hash sha256:f54325af8cd8f83189b47814aed0f459ef29cd6aa90313d7f777ad5cacc88ddb 
    --control-plane --certificate-key 40d156da637185cb06a0ca91613c49840db34b21e09192e648a16249b66005a3

[preflight] Running pre-flight checks
	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.5. Latest validated version: 18.09
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[download-certs] Downloading the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master02 localhost] and IPs [10.0.0.12 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master02 localhost] and IPs [10.0.0.12 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master02 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.12 10.0.0.100]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[certs] Using the existing "sa" key
[kubeconfig] Generating kubeconfig files
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[check-etcd] Checking that the etcd cluster is healthy
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[etcd] Announced new etcd member joining to the existing etcd cluster
[etcd] Wrote Static Pod manifest for a local etcd member to "/etc/kubernetes/manifests/etcd.yaml"
[etcd] Waiting for the new etcd member to join the cluster. This can take up to 40s
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[mark-control-plane] Marking the node master02 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master02 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]

This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

	mkdir -p $HOME/.kube
	sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.

[root@master02 k8s]# mkdir -p $HOME/.kube
[root@master02 k8s]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master02 k8s]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@master02 k8s]# kubectl get node
NAME           STATUS   ROLES    AGE     VERSION
k8s-master01   Ready    master   12h     v1.15.1
master02       Ready    master   2m12s   v1.15.1

#同理master03也可以加入master集群
[root@master03 lb]# kubectl get node
NAME           STATUS   ROLES    AGE     VERSION
k8s-master01   Ready    master   12h     v1.15.1
master02       Ready    master   7m37s   v1.15.1
master03       Ready    master   70s     v1.15.1

[root@master03 lb]# kubectl get pod -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-5c98db65d4-m4c9s               1/1     Running   0          12h
coredns-5c98db65d4-pp925               1/1     Running   8          12h
etcd-k8s-master01                      1/1     Running   1          12h
etcd-master02                          1/1     Running   0          7m32s
etcd-master03                          1/1     Running   0          64s
kube-apiserver-k8s-master01            1/1     Running   7          12h
kube-apiserver-master02                1/1     Running   0          7m33s
kube-apiserver-master03                1/1     Running   0          65s
kube-controller-manager-k8s-master01   1/1     Running   11         12h
kube-controller-manager-master02       1/1     Running   0          7m33s
kube-controller-manager-master03       1/1     Running   0          65s
kube-flannel-ds-amd64-2mt7n            1/1     Running   0          66s
kube-flannel-ds-amd64-4lv5m            1/1     Running   0          7m33s
kube-flannel-ds-amd64-7q7g4            1/1     Running   1          12h
kube-proxy-7qk9g                       1/1     Running   0          66s
kube-proxy-zh9jg                       1/1     Running   0          7m33s
kube-proxy-zpwfz                       1/1     Running   1          12h
kube-scheduler-k8s-master01            1/1     Running   11         12h
kube-scheduler-master02                1/1     Running   0          7m33s
kube-scheduler-master03                1/1     Running   0          65s

七、修改haproxy配置文件

[root@k8s-master01 etc]# cat haproxy.cfg 
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
#chroot /usr/share/haproxy
#user haproxy
#group haproxy
daemon

defaults
    log     global
    mode    http
    option  httplog
    option  dontlognull
    retries 3
    option redispatch
    timeout connect  5000
    timeout client  50000
    timeout server  50000

frontend stats-front
  bind *:8081
  mode http
  default_backend stats-back

frontend fe_k8s_6444
  bind *:6444
  mode tcp
  timeout client 1h
  log global
  option tcplog
  default_backend be_k8s_6443
  acl is_websocket hdr(Upgrade) -i WebSocket
  acl is_websocket hdr_beg(Host) -i ws

backend stats-back
  mode http
  balance roundrobin
  stats uri /haproxy/stats
  stats auth pxcstats:secret

backend be_k8s_6443
  mode tcp
  timeout queue 1h
  timeout server 1h
  timeout connect 1h
  log global
  balance roundrobin
  server rancher01 10.0.0.11:6443
  server rancher02 10.0.0.12:6443
  server rancher03 10.0.0.13:6443

[root@k8s-master01 etc]# docker rm -f HAProxy-K8S && bash /data/lb/start-haproxy.sh 
HAProxy-K8S
b3386548e1e8777167fcf9e92ee348b182a3e4a59b0e2a13384ff13bc4619a61

#三台都需要

八、修改配置

当关掉一台节点,使用kubectl get node有时会卡住,可以修改连接本地的apiserver

[root@master02 ~]# vim .kube/config
server: https://10.0.0.12:6443

#3台都要修改

九、集群健康检查

#etcd集群状态
[root@k8s-master01 ~]# kubectl -n kube-system exec etcd-k8s-master01 -- etcdctl 
> --endpoints=https://10.0.0.11:2379 
> --ca-file=/etc/kubernetes/pki/etcd/ca.crt 
> --cert-file=/etc/kubernetes/pki/etcd/server.crt 
> --key-file=/etc/kubernetes/pki/etcd/server.key cluster-health
member 52c20770c3586df3 is healthy: got healthy result from https://10.0.0.12:2379
member a4ef9fb7d070299d is healthy: got healthy result from https://10.0.0.13:2379
member cebdf10928a06f3c is healthy: got healthy result from https://10.0.0.11:2379
cluster is healthy

#kube-controller-manager(一个工作,两个抑制)
[root@k8s-master01 ~]# kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master02_4a856c96-ded5-4083-b19e-fbe1e4debb0a","leaseDurationSeconds":15,"acquireTime":"2020-02-08T02:03:20Z","renewTime":"2020-02-08T02:31:43Z","leaderTransitions":11}'
  creationTimestamp: "2020-02-07T13:35:54Z"
  name: kube-controller-manager
  namespace: kube-system
  resourceVersion: "5693"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
  uid: cedd7e2d-68b9-4626-946b-9e1b458b322c

#kube-scheduler
[root@k8s-master01 ~]# kubectl get endpoints kube-scheduler --namespace=kube-system  -o yaml
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master02_714ba13b-a72c-486e-b0cc-6fcfd688d215","leaseDurationSeconds":15,"acquireTime":"2020-02-08T02:03:21Z","renewTime":"2020-02-08T02:32:11Z","leaderTransitions":11}'
  creationTimestamp: "2020-02-07T13:35:53Z"
  name: kube-scheduler
  namespace: kube-system
  resourceVersion: "5738"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
  uid: 645b288f-0566-41f5-8c3f-d16ad4ae4171

------------------------------------------------------------------------------

[root@k8s-master01 ~]# kubectl get pod -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-5c98db65d4-m4c9s               1/1     Running   1          12h
coredns-5c98db65d4-pp925               1/1     Running   9          12h
etcd-k8s-master01                      1/1     Running   2          12h
etcd-master02                          1/1     Running   0          30m
etcd-master03                          1/1     Running   0          23m
kube-apiserver-k8s-master01            1/1     Running   8          12h
kube-apiserver-master02                1/1     Running   0          30m
kube-apiserver-master03                1/1     Running   0          23m
kube-controller-manager-k8s-master01   1/1     Running   12         12h
kube-controller-manager-master02       1/1     Running   0          30m
kube-controller-manager-master03       1/1     Running   0          23m
kube-flannel-ds-amd64-2mt7n            1/1     Running   0          23m
kube-flannel-ds-amd64-4lv5m            1/1     Running   0          30m
kube-flannel-ds-amd64-7q7g4            1/1     Running   2          12h
kube-proxy-7qk9g                       1/1     Running   0          23m
kube-proxy-zh9jg                       1/1     Running   0          30m
kube-proxy-zpwfz                       1/1     Running   2          12h
kube-scheduler-k8s-master01            1/1     Running   12         12h
kube-scheduler-master02                1/1     Running   0          30m
kube-scheduler-master03                1/1     Running   0          23m
[root@k8s-master01 ~]# kubectl get node
NAME           STATUS   ROLES    AGE   VERSION
k8s-master01   Ready    master   12h   v1.15.1
master02       Ready    master   30m   v1.15.1
master03       Ready    master   24m   v1.15.1

十、node加入

直接执行初始化,node加入命令即可

原文地址:https://www.cnblogs.com/hujinzhong/p/12274210.html