主机名称 | IP地址 | 部署软件 | 备注 |
---|---|---|---|
M-kube12 | 192.168.10.12 | master+etcd+docker+keepalived+haproxy | master |
M-kube13 | 192.168.10.13 | master+etcd+docker+keepalived+haproxy | master |
M-kube14 | 192.168.10.14 | master+etcd+docker+keepalived+haproxy | master |
N-kube15 | 192.168.10.15 | docker+node | node |
N-kube16 | 192.168.10.16 | docker+node | node |
VIP | 192.168.10.100 | VIP |
1.1 环境准备
#参考别人的
cat << EOF > /etc/sysconfig/modules/ipvs.modules #!/bin/bash ipvs_modules_dir="/usr/lib/modules/\`uname -r\`/kernel/net/netfilter/ipvs" for i in \`ls $ipvs_modules_dir | sed -r 's#(.*).ko.*#1#'\`; do /sbin/modinfo -F filename $i &> /dev/null if [ $? -eq 0 ]; then /sbin/modprobe $i fi done EOF chmod +x /etc/sysconfig/modules/ipvs.modules bash /etc/sysconfig/modules/ipvs.modules
1.2 配置keepalived
yum install -y keepalived #10.12机器上配置 cat <<EOF > /etc/keepalived/keepalived.conf global_defs { router_id LVS_k8s } vrrp_script CheckK8sMaster { script "curl -k https://192.168.10.100:6444" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state MASTER interface ens33 virtual_router_id 100 priority 100 advert_int 1 mcast_src_ip 192.168.10.12 nopreempt authentication { auth_type PASS auth_pass fana123 } unicast_peer { 192.168.10.13 192.168.10.14 } virtual_ipaddress { 192.168.10.100/24 } track_script { CheckK8sMaster } } EOF #13机器keepalived配置 cat <<EOF > /etc/keepalived/keepalived.conf global_defs { router_id LVS_k8s } vrrp_script CheckK8sMaster { script "curl -k https://192.168.10.100:6444" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state BACKUP interface ens33 virtual_router_id 100 priority 90 advert_int 1 mcast_src_ip 192.168.10.13 nopreempt authentication { auth_type PASS auth_pass fana123 } unicast_peer { 192.168.10.12 192.168.10.14 } virtual_ipaddress { 192.168.10.100/24 } track_script { CheckK8sMaster } } EOF #14机器上keepalived配置 cat <<EOF > /etc/keepalived/keepalived.conf global_defs { router_id LVS_k8s } vrrp_script CheckK8sMaster { script "curl -k https://192.168.10.100:6444" interval 3 timeout 9 fall 2 rise 2 } vrrp_instance VI_1 { state BACKUP interface ens33 virtual_router_id 100 priority 80 advert_int 1 mcast_src_ip 192.168.10.14 nopreempt authentication { auth_type PASS auth_pass fana123 } unicast_peer { 192.168.10.12 192.168.10.13 } virtual_ipaddress { 192.168.10.100/24 } track_script { CheckK8sMaster } } EOF #启动keepalived systemctl restart keepalived && systemctl enable keepalived
1.3 配置haproxy
yum install -y haproxy #13机器上配置 cat << EOF > /etc/haproxy/haproxy.cfg global log 127.0.0.1 local2 chroot /var/lib/haproxy pidfile /var/run/haproxy.pid maxconn 4000 user haproxy group haproxy daemon defaults mode tcp log global retries 3 timeout connect 10s timeout client 1m timeout server 1m frontend kubernetes bind *:6444 mode tcp default_backend kubernetes-master backend kubernetes-master balance roundrobin server M-kube12 192.168.10.12:6443 check maxconn 2000 server M-kube13 192.168.10.13:6443 check maxconn 2000 server M-kube14 192.168.10.14:6443 check maxconn 2000 EOF #12,13,和 14机器上配置都一样 # 启动haproxy systemctl enable haproxy && systemctl start haproxy 也可以用容器的方式部署 Copy # haproxy启动脚本 mkdir -p /data/lb cat > /data/lb/start-haproxy.sh << "EOF" #!/bin/bash MasterIP1=192.168.10.12 MasterIP2=192.168.10.13 MasterIP3=192.168.10.14 MasterPort=6443 docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 -e MasterIP1=$MasterIP1 -e MasterIP2=$MasterIP2 -e MasterIP3=$MasterIP3 -e MasterPort=$MasterPort wise2c/haproxy-k8s EOF #keepalived启动脚本 cat > /data/lb/start-keepalived.sh << "EOF" #!/bin/bash VIRTUAL_IP=192.168.10.100 INTERFACE=ens33 NETMASK_BIT=24 CHECK_PORT=6444 RID=10 VRID=160 MCAST_GROUP=224.0.0.18 docker run -itd --restart=always --name=Keepalived-K8S --net=host --cap-add=NET_ADMIN -e VIRTUAL_IP=$VIRTUAL_IP -e INTERFACE=$INTERFACE -e CHECK_PORT=$CHECK_PORT -e RID=$RID -e VRID=$VRID -e NETMASK_BIT=$NETMASK_BIT -e MCAST_GROUP=$MCAST_GROUP wise2c/keepalived-k8s EOF #把脚本拷贝到13和14机器上,然后启动 sh /data/lb/start-haproxy.sh && sh /data/lb/start-keepalived.sh docker ps #可以看到容器的启动状态,相关配置文件可以进入容器查看
1.4 配置etcd
14.1 在10.12机器上配置etcd证书
#下载cfssl包 wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 #设置cfssl环境 chmod +x cfssl* mv cfssl_linux-amd64 /usr/local/bin/cfssl mv cfssljson_linux-amd64 /usr/local/bin/cfssljson mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo export PATH=/usr/local/bin:$PATH #配置CA文件(IP地址为etc节点的IP) mkdir /root/ssl && cd /root/ssl cat > ca-config.json <<EOF { "signing": { "default": { "expiry": "8760h" }, "profiles": { "kubernetes-Soulmate": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "8760h" } } } } EOF #--------------------------------------------------------# cat > ca-csr.json <<EOF { "CN": "kubernetes-Soulmate", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "shanghai", "L": "shanghai", "O": "k8s", "OU": "System" } ] } EOF #--------------------------------------------------------# cat > etcd-csr.json <<EOF { "CN": "etcd", "hosts": [ "127.0.0.1", "192.168.10.12", "192.168.10.13", "192.168.10.14" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "shanghai", "L": "shanghai", "O": "k8s", "OU": "System" } ] } EOF #--------------------------------------------------------# cfssl gencert -initca ca-csr.json | cfssljson -bare ca cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes-Soulmate etcd-csr.json | cfssljson -bare etcd #将10.13的etcd证书分发到14,15机器上 mkdir -p /etc/etcd/ssl && cp *.pem /etc/etcd/ssl/ ssh -n 192.168.10.13 "mkdir -p /etc/etcd/ssl && exit" ssh -n 192.168.10.14 "mkdir -p /etc/etcd/ssl && exit" scp -r /etc/etcd/ssl/*.pem 192.168.10.13:/etc/etcd/ssl/ scp -r /etc/etcd/ssl/*.pem 192.168.10.14:/etc/etcd/ssl/
1.4.2 在3台主节点上操作,安装etcd并配置
yum install etcd -y mkdir -p /var/lib/etcd
#10.12机器上操作
cat <<EOF >/etc/systemd/system/etcd.service [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/usr/bin/etcd --name M-kube12 --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/etcd/ssl/ca.pem --peer-cert-file=/etc/etcd/ssl/etcd.pem --peer-key-file=/etc/etcd/ssl/etcd-key.pem --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem --initial-advertise-peer-urls https://192.168.10.12:2380 --listen-peer-urls https://192.168.10.12:2380 --listen-client-urls https://192.168.10.12:2379,http://127.0.0.1:2379 --advertise-client-urls https://192.168.10.12:2379 --initial-cluster-token etcd-cluster-0 --initial-cluster M-kube12=https://192.168.10.12:2380,M-kube13=https://192.168.10.13:2380,M-kube14=https://192.168.10.14:2380 --initial-cluster-state new --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
#10.13上机器操作
cat <<EOF >/etc/systemd/system/etcd.service [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/usr/bin/etcd --name M-kube13 --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --peer-cert-file=/etc/etcd/ssl/etcd.pem --peer-key-file=/etc/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/etcd/ssl/ca.pem --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem --initial-advertise-peer-urls https://192.168.10.13:2380 --listen-peer-urls https://192.168.10.13:2380 --listen-client-urls https://192.168.10.13:2379,http://127.0.0.1:2379 --advertise-client-urls https://192.168.10.13:2379 --initial-cluster-token etcd-cluster-0 --initial-cluster M-kube12=https://192.168.10.12:2380,M-kube13=https://192.168.10.13:2380,M-kube14=https://192.168.10.14:2380 --initial-cluster-state new --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
#10.14机器上操作
cat <<EOF >/etc/systemd/system/etcd.service [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/usr/bin/etcd --name M-kube14 --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --peer-cert-file=/etc/etcd/ssl/etcd.pem --peer-key-file=/etc/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/etcd/ssl/ca.pem --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem --initial-advertise-peer-urls https://192.168.10.14:2380 --listen-peer-urls https://192.168.10.14:2380 --listen-client-urls https://192.168.10.14:2379,http://127.0.0.1:2379 --advertise-client-urls https://192.168.10.14:2379 --initial-cluster-token etcd-cluster-0 --initial-cluster M-kube12=https://192.168.10.12:2380,M-kube13=https://192.168.10.13:2380,M-kube14=https://192.168.10.14:2380 --initial-cluster-state new --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
#添加自启动
cp /etc/systemd/system/etcd.service /usr/lib/systemd/system/ systemctl daemon-reload && systemctl start etcd && systemctl enable etcd && systemctl status etcd
#在etc节点上检查
etcdctl --endpoints=https://192.168.10.12:2379,https://192.168.10.13:2379,https://192.168.10.14:2379 --ca-file=/etc/etcd/ssl/ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem cluster-health
#正常的话会有如下提示
member 1af68d968c7e3f22 is healthy: got healthy result from https://192.168.10.12:2379 member 55204c19ed228077 is healthy: got healthy result from https://192.168.10.14:2379 member e8d9a97b17f26476 is healthy: got healthy result from https://192.168.10.13:2379 cluster is healthy
1.5 安装Docker
如今Docker分为了Docker-CE和Docker-EE两个版本,CE为社区版即免费版,EE为企业版即商业版。我们选择使用CE版。
在所有的机器上安装docker
yum安装docker
#1.安装yum源工具包 yum install -y yum-utils device-mapper-persistent-data lvm2 #2.下载docker-ce官方的yum源配置文件,上面操作了 这里就不操作了 # yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo # yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo #3.禁用docker-c-edge源配edge是不开发版,不稳定,下载stable版 yum-config-manager --disable docker-ce-edge #4.更新本地YUM源缓存 yum makecache fast #5.安装Docker-ce相应版本 yum -y install docker-ce #6.配置daemon, 因为kubelet的启动环境变量要与docker的cgroup-driver驱动相同,以下是官方推荐处理方式 #由于国内拉取镜像较慢,配置文件最后追加了阿里云镜像加速配置。 cat > /etc/docker/daemon.json <<EOF { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ], "registry-mirrors": ["https://uyah70su.mirror.aliyuncs.com"] } EOF #7.设置开机自启动 systemctl restart docker && systemctl enable docker && systemctl status docker
运行hello world验证
[root@localhost ~]# docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from library/hello-world 9a0669468bf7: Pull complete Digest: sha256:0e06ef5e1945a718b02a8c319e15bae44f47039005530bc617a5d071190ed3fc Status: Downloaded newer image for hello-world:latest Hello from Docker! This message shows that your installation appears to be working correctly. To generate this message, Docker took the following steps: 1. The Docker client contacted the Docker daemon. 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. 3. The Docker daemon created a new container from that image which runs the executable that produces the output you are currently reading. 4. The Docker daemon streamed that output to the Docker client, which sent it to your terminal. To try something more ambitious, you can run an Ubuntu container with: $ docker run -it ubuntu bash Share images, automate workflows, and more with a free Docker ID: https://cloud.docker.com/ For more examples and ideas, visit: https://docs.docker.com/engine/userguide/
1.6 安装kubelet与kubeadm包
使用DaoCloud加速器(可以跳过这一步)
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://0d236e3f.m.daocloud.io # docker version >= 1.12 # {"registry-mirrors": ["http://0d236e3f.m.daocloud.io"]} # Success. # You need to restart docker to take effect: sudo systemctl restart docker systemctl restart docker
在所有机器安装kubectl kubelet kubeadm kubernetes-cni
yum list kubectl kubelet kubeadm kubernetes-cni #查看可安装的包 已加载插件:fastestmirror Loading mirror speeds from cached hostfile * base: mirrors.tuna.tsinghua.edu.cn * extras: mirrors.sohu.com * updates: mirrors.sohu.com #显示可安装的软件包 kubeadm.x86_64 1.14.3-0 kubernetes kubectl.x86_64 1.14.3-0 kubernetes kubelet.x86_64 1.14.3-0 kubernetes kubernetes-cni.x86_64 0.7.5-0 kubernetes [root@localhost ~]# #然后安装kubectl kubelet kubeadm kubernetes-cni yum install -y kubectl kubelet kubeadm kubernetes-cni # Kubelet负责与其他节点集群通信,并进行本节点Pod和容器生命周期的管理。 # Kubeadm是Kubernetes的自动化部署工具,降低了部署难度,提高效率。 # Kubectl是Kubernetes集群管理工具
修改kubelet配置文件(可不操作)
vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf #或者在如下目录可不操作 /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf # 修改一行 Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" # 添加一行 Environment="KUBELET_EXTRA_ARGS=--v=2 --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sth/pause-amd64:3.0" #重新加载配置 systemctl daemon-reload
#1.命令补全
yum install -y bash-completion source /usr/share/bash-completion/bash_completion source <(kubectl completion bash) echo "source <(kubectl completion bash)" >> ~/.bashrc
#启动所有主机上的kubelet服务
systemctl enable kubelet && systemctl start kubelet
1.7 初始化集群
kubeadm init主要执行了以下操作:
[init]:指定版本进行初始化操作
[preflight] :初始化前的检查和下载所需要的Docker镜像文件
[kubelet-start]:生成kubelet的配置文件”/var/lib/kubelet/config.yaml”,没有这个文件kubelet无法启动,所以初始化之前的kubelet实际上启动失败。
[certificates]:生成Kubernetes使用的证书,存放在/etc/kubernetes/pki目录中。
[kubeconfig] :生成 KubeConfig 文件,存放在/etc/kubernetes目录中,组件之间通信需要使用对应文件。
[control-plane]:使用/etc/kubernetes/manifest目录下的YAML文件,安装 Master 组件。
[etcd]:使用/etc/kubernetes/manifest/etcd.yaml安装Etcd服务。
[wait-control-plane]:等待control-plan部署的Master组件启动。
[apiclient]:检查Master组件服务状态。
[uploadconfig]:更新配置
[kubelet]:使用configMap配置kubelet。
[patchnode]:更新CNI信息到Node上,通过注释的方式记录。
[mark-control-plane]:为当前节点打标签,打了角色Master,和不可调度标签,这样默认就不会使用Master节点来运行Pod。
[bootstrap-token]:生成token记录下来,后边使用kubeadm join往集群中添加节点时会用到
[addons]:安装附加组件CoreDNS和kube-proxy
1.7.1 在10.12 机器上添加集群初始化配置文件
1.7.2 查看当前状态
[root@M-kube12 kubernetes]# kubectl get node NAME STATUS ROLES AGE VERSION m-kube12 NotReady master 3m40s v1.14.3 # STATUS显示的状态还是不可用 [root@M-kube12 kubernetes]# kubectl -n kube-system get pod NAME READY STATUS RESTARTS AGE coredns-8686dcc4fd-fmlsh 0/1 Pending 0 3m40s coredns-8686dcc4fd-m22j7 0/1 Pending 0 3m40s etcd-m-kube12 1/1 Running 0 2m59s kube-apiserver-m-kube12 1/1 Running 0 2m53s kube-controller-manager-m-kube12 1/1 Running 0 2m33s kube-proxy-4kg8d 1/1 Running 0 3m40s kube-scheduler-m-kube12 1/1 Running 0 2m45s [root@M-kube12 kubernetes]# kubectl get cs NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-0 Healthy {"health":"true"}
1.7.3 部署flannel网络,在所有节点上执行
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml #版本信息:quay.io/coreos/flannel:v0.11.0-amd64 cat kube-flannel.yml | grep image cat kube-flannel.yml | grep 10.244 sed -i 's#quay.io/coreos/flannel:v0.11.0-amd64#willdockerhub/flannel:v0.11.0-amd64#g' kube-flannel.yml #如果网络比较好,可不修改 kubectl apply -f kube-flannel.yml #或者直接创建 kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml #等待一会 查看 node和pod 状态全部为Running [root@M-fana3 kubernetes]# kubectl get node NAME STATUS ROLES AGE VERSION m-fana3 Ready master 42m v1.14.3 #状态正常了 [root@M-fana3 kubernetes]# kubectl -n kube-system get pod NAME READY STATUS RESTARTS AGE coredns-8686dcc4fd-2z6m2 1/1 Running 0 42m coredns-8686dcc4fd-4k7mm 1/1 Running 0 42m etcd-m-fana3 1/1 Running 0 41m kube-apiserver-m-fana3 1/1 Running 0 41m kube-controller-manager-m-fana3 1/1 Running 0 41m kube-flannel-ds-amd64-6zrzt 1/1 Running 0 109s kube-proxy-lc8d5 1/1 Running 0 42m kube-scheduler-m-fana3 1/1 Running 0 41m #如果遇到问题想如下情况,有可能镜像拉取失败了, kubectl -n kube-system get pod NAME READY STATUS RESTARTS AGE coredns-8686dcc4fd-c9mw7 0/1 Pending 0 43m coredns-8686dcc4fd-l8fpm 0/1 Pending 0 43m kube-apiserver-m-kube12 1/1 Running 0 42m kube-controller-manager-m-kube12 1/1 Running 0 17m kube-flannel-ds-amd64-gcmmp 0/1 Init:ImagePullBackOff 0 11m kube-proxy-czzk7 1/1 Running 0 43m kube-scheduler-m-kube12 1/1 Running 0 42m #可以通过 kubectl describe pod kube-flannel-ds-amd64-gcmmp --namespace=kube-system 查看pod状态,看到最后报错如下,可以手动下载或者二进制安装 Node-Selectors: beta.kubernetes.io/arch=amd64 Tolerations: :NoSchedule node.kubernetes.io/disk-pressure:NoSchedule node.kubernetes.io/memory-pressure:NoSchedule node.kubernetes.io/network-unavailable:NoSchedule node.kubernetes.io/not-ready:NoExecute node.kubernetes.io/pid-pressure:NoSchedule node.kubernetes.io/unreachable:NoExecute node.kubernetes.io/unschedulable:NoSchedule Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 11m default-scheduler Successfully assigned kube-system/kube-flannel-ds-amd64-gcmmp to m-kube12 Normal Pulling 11m kubelet, m-kube12 Pulling image "willdockerhub/flannel:v0.11.0-amd64" Warning FailedMount 7m27s kubelet, m-kube12 MountVolume.SetUp failed for volume "flannel-token-6g9n7" : couldn't propagate object cache: timed out waiting for the condition Warning FailedMount 7m27s kubelet, m-kube12 MountVolume.SetUp failed for volume "flannel-cfg" : couldn't propagate object cache: timed out waiting for the condition Warning Failed 4m21s kubelet, m-kube12 Failed to pull image "willdockerhub/flannel:v0.11.0-amd64": rpc error: code = Unknown desc = context canceled Warning Failed 3m53s kubelet, m-kube12 Failed to pull image "willdockerhub/flannel:v0.11.0-amd64": rpc error: code = Unknown desc = Error response from daemon: Get https://registry-1.docker.io/v2/: net/http: request canceled (Client.Timeout exceeded while awaiting headers) Warning Failed 3m16s kubelet, m-kube12 Failed to pull image "willdockerhub/flannel:v0.11.0-amd64": rpc error: code = Unknown desc = Error response from daemon: Get https://registry-1.docker.io/v2/: net/http: TLS handshake timeout Warning Failed 3m16s (x3 over 4m21s) kubelet, m-kube12 Error: ErrImagePull Normal SandboxChanged 3m14s kubelet, m-kube12 Pod sandbox changed, it will be killed and re-created. Normal BackOff 2m47s (x6 over 4m21s) kubelet, m-kube12 Back-off pulling image "willdockerhub/flannel:v0.11.0-amd64" Warning Failed 2m47s (x6 over 4m21s) kubelet, m-kube12 Error: ImagePullBackOff Normal Pulling 2m33s (x4 over 7m26s) kubelet, m-kube12 Pulling image "willdockerhub/flannel:v0.11.0-amd64"
1.7.4 加入集群后验证
#1.master上执行,加入集群命令 kubeadm join 192.168.10.100:6443 --token y6v90q.i6bl1bwcgg8clvh5 --discovery-token-ca-cert-hash sha256:179c5689ef32be2123c9f02015ef25176d177c54322500665f1170f26368ae3d --experimental-control-plane --certificate-key 3044cb04c999706795b28c1d3dcd2305dcf181787d7c6537284341a985395c20 #2.拷贝kube到用户目录 mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config #3.node上执行 加入集群 #如果忘记node节点加入集群的命令可以使用kubeadm token create --print-join-command 查看 kubeadm join 192.168.10.100:6443 --token y6v90q.i6bl1bwcgg8clvh5 --discovery-token-ca-cert-hash sha256:179c5689ef32be2123c9f02015ef25176d177c54322500665f1170f26368ae3d #4.验证集群状态 kubectl -n kube-system get pod -o wide #查看pod运行情况 kubectl get nodes -o wide #查看节点情况 kubectl -n kube-system get svc #查看service NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 16m ipvsadm -ln #查看代理规则
1.7.5 集群测试
准备部署一个简单的web服务来测试集群。
cat > /opt/deployment-goweb.yaml << EOF apiVersion: apps/v1 kind: Deployment metadata: name: goweb spec: selector: matchLabels: app: goweb replicas: 4 template: metadata: labels: app: goweb spec: containers: - image: lingtony/goweb name: goweb ports: - containerPort: 8000 EOF #------------------------------------- cat > /opt/svc-goweb.yaml << EOF apiVersion: v1 kind: Service metadata: name: gowebsvc spec: selector: app: goweb ports: - name: default protocol: TCP port: 80 targetPort: 8000 EOF # -----------------------------------部署服务 kubectl apply -f deployment-goweb.yaml kubectl apply -f svc-goweb.yaml #--------------查看pod get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES goweb-6c569f884-4ln4s 1/1 Running 0 75s 10.244.1.2 n-kube15 <none> <none> goweb-6c569f884-jcnrs 1/1 Running 0 75s 10.244.1.3 n-kube15 <none> <none> goweb-6c569f884-njnzk 1/1 Running 0 75s 10.244.1.4 n-kube15 <none> <none> goweb-6c569f884-zxnrx 1/1 Running 0 75s 10.244.1.5 n-kube15 <none> <none> #--------查看服务 kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE gowebsvc ClusterIP 10.105.87.199 <none> 80/TCP 84s kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 30m #-----访问测试,可以看到对SVC的请求会在pod之间负载 curl http://10.105.87.199/info # Hostname: goweb-6c569f884-jcnrs curl http://10.105.87.199/info # Hostname: goweb-6c569f884-4ln4s curl http://10.105.87.199/info # Hostname: goweb-6c569f884-zxnrx curl http://10.105.87.199/info # Hostname: goweb-6c569f884-njnzk curl http://10.105.87.199/info # Hostname: goweb-6c569f884-jcnrs curl http://10.105.87.199/info # Hostname: goweb-6c569f884-4ln4s curl http://10.105.87.199/info # Hostname: goweb-6c569f884-zxnrx curl http://10.105.87.199/info # Hostname: goweb-6c569f884-njnzk curl http://10.105.87.199/info # Hostname: goweb-6c569f884-jcnrs
1.8 配置dashboard
默认是没web界面的,可以在master机器上安装一个dashboard插件,实现通过web来管理。
dashboard项目的GitHub地址:https://github.com/kubernetes/dashboard/releases
准备的镜像:
k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
咱们可以先从阿里镜像库拉取镜像