二进制安装部署k8s集群V1.15

二进制安装部署k8s v1.15

一、集群架构图解

image-20210423153251002

二、部署前准备

2.1、准备虚拟机

5台CentOS7虚拟机:

主机名 角色 ip 配置 部署服务
szdc-11.host.com k8s代理节点1 10.0.0.11 4C8G bind9,nginx(四层代理),keeplived,supervisor
szdc-12.host.com k8s代理节点2 10.0.0.12 4C8G etcd,nginx(四层代理),keeplived,supervisor
szdc-21.host.com k8s运算节点1 10.0.0.21 4C8G etcd,kube-apiserver,kube-controller-manager,kube-scheduler,kube-kubelet,kube-proxy,supervisor
szdc-22.host.com k8s运算节点2 10.0.0.22 4C8G etcd,kube-apiserver,kube-controller-manager,kube-scheduler,kube-kubelet,kube-proxy,supervisor
szdc-200.host.com 运维节点(docker仓库) 10.0.0.200 4C8G 证书服务,docker私有仓库harbor,nginx代理harbor,pause
# 系统版本
[root@szdc0-11 ~]# uname -r
3.10.0-693.el7.x86_64
[root@szdc0-11 ~]# uname -a
Linux szdc0-11.host.com 3.10.0-693.el7.x86_64 #1 SMP Tue Aug 22 21:09:27 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux

2.2、系统优化

# 1、设置主机名
hostnamectl set-hostname <hostname>

# 2、关闭selinux和防火墙
setenforce 0 && sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
systemctl stop firewalld && systemctl disable firewalld

# 3、安装epel源
yum install -y epel-release

# 4、安装必要工具
yum install wget net-tools telnet tree nmap sysstat lrzsz dos2unix bind-utils -y

2.3、部署DNS服务

2.3.1、安装bind服务

# 在szdc-11.host.com主机上操作
[root@szdc0-11 ~]# yum install bind -y

2.3.2、配置bind

1)主配置文件

# 1、修改主配置文件
[root@szdc0-11 ~]# vim /etc/named.conf
....
listen-on port 53 { 10.0.0.11; };
allow-query     { any; };
forwarders      { 10.0.0.254; };	# 向上递归查询,填写网关地址(增加一条)
dnssec-enable no;
dnssec-validation no;
...

# 2、检查配置文件
[root@szdc0-11 ~]# named-checkconf 

2)添加区域配置文件

# 末尾添加
[root@szdc0-11 ~]# vim /etc/named.rfc1912.zones
zone "host.com" IN {
        type  master;
        file  "host.com.zone";
        allow-update { 10.0.0.11; };
};

zone "od.com" IN {
        type  master;
        file  "od.com.zone";
        allow-update { 10.0.0.11; };
};

3)配置区域数据文件

# 1、配置主机与数据文件
$ORIGIN host.com.
$TTL 600    ; 10 minutes
@       IN SOA    dns.host.com. dnsadmin.host.com. (
                2021042301 ; serial
                10800      ; refresh (3 hours)
                900        ; retry (15 minutes)
                604800     ; expire (1 week)
                86400      ; minimum (1 day)
                )
            NS   dns.host.com.
$TTL 60    ; 1 minute
dns                A    10.0.0.11
szdc0-11           A    10.0.0.11
szdc0-12           A    10.0.0.12
szdc0-21           A    10.0.0.21
szdc0-22           A    10.0.0.22
szdc0-200          A    10.0.0.200

# 2、业务域数据文件
[root@szdc0-11 ~]# vim /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600    ; 10 minutes
@           IN SOA    dns.od.com. dnsadmin.od.com. (
                2021042301 ; serial
                10800      ; refresh (3 hours)
                900        ; retry (15 minutes)
                604800     ; expire (1 week)
                86400      ; minimum (1 day)
                )
                NS   dns.od.com.
$TTL 60    ; 1 minute
dns                A    10.0.0.11

2.3.3、启动bind

[root@szdc0-11 ~]# named-checkconf
[root@szdc0-11 ~]# systemctl start named
[root@szdc0-11 ~]# systemctl enable named
[root@szdc0-11 ~]# netstat -lntp|grep named
tcp        0      0 10.0.0.11:53            0.0.0.0:*               LISTEN      15241/named         
tcp        0      0 127.0.0.1:953           0.0.0.0:*               LISTEN      15241/named         
tcp6       0      0 ::1:953                 :::*                    LISTEN      15241/named

2.3.4、域名解析检查

[root@szdc0-11 ~]# dig -t A szdc0-11.host.com @10.0.0.11 +short
10.0.0.11
[root@szdc0-11 ~]# dig -t A szdc0-12.host.com @10.0.0.11 +short
10.0.0.12
[root@szdc0-11 ~]# dig -t A szdc0-21.host.com @10.0.0.11 +short
10.0.0.21
[root@szdc0-11 ~]# dig -t A szdc0-22.host.com @10.0.0.11 +short
10.0.0.22
[root@szdc0-11 ~]# dig -t A szdc0-200.host.com @10.0.0.11 +short
10.0.0.200
[root@szdc0-11 ~]# dig -t A dns.host.com @10.0.0.11 +short
10.0.0.11
[root@szdc0-11 ~]# dig -t A dns.od.com @10.0.0.11 +short
10.0.0.11

2.3.5、DNS客户端配置

1)Linux主机配置

# 1、修改eth0的DNS指向(所有节点都要修改)
[root@szdc0-11 ~]# sed -ri 's#^DNS.*#DNS1=10.0.0.11#g' /etc/sysconfig/network-scripts/ifcfg-eth0

# 2、重启网卡
[root@szdc0-11 ~]# systemctl restart network

# 3、查看/etc/resolv.conf 
[root@szdc0-11 ~]# vim /etc/resolv.conf
# Generated by NetworkManager
search host.com	# 如果没有这行添加即可
nameserver 10.0.0.11

2)windows主机配置

网络和共享中心 -> 网卡设置 -> 设置DNS服务器
如有必要,还应设置虚拟网卡的接口地跃点数为:10

image-20210423162845922

image-20210423162957706

2.3.6、检查

Linux主机上:

[root@szdc0-200 ~]# ping szdc0-200
PING szdc0-200.host.com (10.0.0.200) 56(84) bytes of data.
64 bytes from szdc0-200.host.com (10.0.0.200): icmp_seq=1 ttl=64 time=0.010 ms
64 bytes from szdc0-200.host.com (10.0.0.200): icmp_seq=2 ttl=64 time=0.036 ms

[root@szdc0-200 ~]# ping szdc0-200.host.com
PING szdc0-200.host.com (10.0.0.200) 56(84) bytes of data.
64 bytes from szdc0-200.host.com (10.0.0.200): icmp_seq=1 ttl=64 time=0.013 ms
64 bytes from szdc0-200.host.com (10.0.0.200): icmp_seq=2 ttl=64 time=0.050 ms

windows主机上:

C:WindowsSystem32>ping szdc0-200.host.com
正在 Ping szdc0-200.host.com [10.0.0.200] 具有 32 字节的数据:
来自 10.0.0.200 的回复: 字节=32 时间<1ms TTL=64
来自 10.0.0.200 的回复: 字节=32 时间<1ms TTL=64
来自 10.0.0.200 的回复: 字节=32 时间<1ms TTL=64

C:WindowsSystem32>ping dns.host.com
正在 Ping dns.host.com [10.0.0.11] 具有 32 字节的数据:
来自 10.0.0.11 的回复: 字节=32 时间<1ms TTL=64
来自 10.0.0.11 的回复: 字节=32 时间<1ms TTL=64

C:WindowsSystem32>ping dns.od.com
正在 Ping dns.od.com [10.0.0.11] 具有 32 字节的数据:
来自 10.0.0.11 的回复: 字节=32 时间<1ms TTL=64
来自 10.0.0.11 的回复: 字节=32 时间<1ms TTL=64

2.4、准备自签证书

在运维主机上操作:

2.4.1、安装cfssl

[root@szdc0-200 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/bin/cfssl
[root@szdc0-200 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/bin/cfssl-json
[root@szdc0-200 ~]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/bin/cfssl-certinfo
[root@szdc0-200 ~]# chmod +x /usr/bin/cfssl*

2.4.2、创建CA证书请求文件

[root@szdc0-200 src]# mkdir /opt/certs
[root@szdc0-200 src]# vim /opt/certs/ca-csr.json
{
    "CN": "OldboyEdu",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ],
    "ca": {
        "expiry": "175200h"
    }
}

证书请求文件说明

  • CN: Common Name,浏览器使用该字段验证网站是否合法,一般写的是域名。非常重要。
  • C:Country,国家
  • ST:State,州,省
  • L:Locality,城区,城市
  • O:Organization Name,组织名称,公司名称
  • OU:Organization Unit Name。组织单位名称,公司部门

2.4.3、生成CA证书和私钥

[root@szdc0-200 ~]# cd /opt/certs/
[root@szdc0-200 certs]# ll
total 4
-rw-r--r-- 1 root root 328 Apr 23 16:55 ca-csr.json
[root@szdc0-200 certs]# cfssl gencert -initca ca-csr.json | cfssl-json -bare ca
2021/04/23 17:00:49 [INFO] generating a new CA key and certificate from CSR
2021/04/23 17:00:49 [INFO] generate received request
2021/04/23 17:00:49 [INFO] received CSR
2021/04/23 17:00:49 [INFO] generating key: rsa-2048
2021/04/23 17:00:50 [INFO] encoded CSR
2021/04/23 17:00:50 [INFO] signed certificate with serial number 96593049137918906768333297514641004790385976570
[root@szdc0-200 certs]# ll
total 16
-rw-r--r-- 1 root root  993 Apr 23 17:00 ca.csr
-rw-r--r-- 1 root root  328 Apr 23 16:55 ca-csr.json
-rw------- 1 root root 1679 Apr 23 17:00 ca-key.pem
-rw-r--r-- 1 root root 1346 Apr 23 17:00 ca.pem

2.5、部署docker环境

需要部署的主机:szdc0-21,szdc0-22,szdc0-200

2.5.1、安装docker

参考脚本:https://github.com/docker/docker-install/blob/master/install.sh

[root@szdc0-21 ~]# curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
[root@szdc0-22 ~]# curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
[root@szdc0-200 ~]# curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun

2.5.2、配置docker

# szdc0-21
[root@szdc0-21 ~]# mkdir /etc/docker
[root@szdc0-21 ~]# vim /etc/docker/daemon.json
{
  "graph": "/data/docker",
  "storage-driver": "overlay2",
  "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com"],
  "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
  "bip": "172.6.21.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}
[root@szdc0-21 ~]# systemctl start docker &&systemctl enable docker
[root@szdc0-21 ~]# docker version

# szdc0-22
[root@szdc0-22 ~]# mkdir /etc/docker
[root@szdc0-22 ~]# vim /etc/docker/daemon.json
{
  "graph": "/data/docker",
  "storage-driver": "overlay2",
  "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com"],
  "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
  "bip": "172.6.22.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}
[root@szdc0-22 ~]# systemctl start docker &&systemctl enable docker
[root@szdc0-22 ~]# docker version

# szdc0-200
[root@szdc0-200 ~]# mkdir /etc/docker
[root@szdc0-200 ~]# vim /etc/docker/daemon.json
{
  "graph": "/data/docker",
  "storage-driver": "overlay2",
  "insecure-registries": ["registry.access.redhat.com","quay.io","harbor.od.com"],
  "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
  "bip": "172.6.200.1/24",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": true
}
[root@szdc0-200 ~]# systemctl start docker &&systemctl enable docker
[root@szdc0-200 ~]# docker version

2.6、部署Harbor

2.6.1、下载软件包解压

github地址:https://github.com/goharbor/harbor/releases

[root@szdc0-200 ~]# mkdir -p /opt/src/harbor
[root@szdc0-200 ~]# cd /opt/src/harbor
[root@szdc0-200 harbor]# wget https://storage.googleapis.com/harbor-releases/release-1.8.0/harbor-offline-installer-v1.8.3.tgz
[root@szdc0-200 harbor]# tar xvf harbor-offline-installer-v1.8.3.tgz -C /opt
[root@szdc0-200 harbor]# mv /opt/harbor /opt/harbor-v1.8.3
[root@szdc0-200 harbor]# ln -s /opt/harbor-v1.8.3 /opt/harbor

2.6.2、配置harbor

[root@szdc0-200 harbor]# vim /opt/harbor/harbor.yml
hostname: harbor.od.com
http:
  port: 180
harbor_admin_password: Harbor12345
data_volume: /data/harbor
log:
  level: info
  rotate_count: 50
  rotate_size: 200M
  location: /data/harbor/logs
  
[root@szdc0-200 harbor]# mkdir -p /data/harbor/logs

2.6.3、安装docker-compose

[root@szdc0-200 harbor]# yum install docker-compose -y
[root@szdc0-200 harbor]# rpm -qa docker-compose
docker-compose-1.18.0-4.el7.noarch

2.6.4、安装harbor

[root@szdc0-200 harbor]# ll
total 569632
-rw-r--r-- 1 root root 583269670 Sep 16  2019 harbor.v1.8.3.tar.gz
-rw-r--r-- 1 root root      4526 Apr 23 17:22 harbor.yml
-rwxr-xr-x 1 root root      5088 Sep 16  2019 install.sh
-rw-r--r-- 1 root root     11347 Sep 16  2019 LICENSE
-rwxr-xr-x 1 root root      1654 Sep 16  2019 prepare
[root@szdc0-200 harbor]# sh -x install.sh 
[root@szdc0-200 harbor]# docker-compose ps
      Name                     Command               State                 Ports               
-----------------------------------------------------------------------------------------------
harbor-core         /harbor/start.sh                 Up                                        
harbor-db           /entrypoint.sh postgres          Up      5432/tcp                          
harbor-jobservice   /harbor/start.sh                 Up                                        
harbor-log          /bin/sh -c /usr/local/bin/ ...   Up      127.0.0.1:1514->10514/tcp         
harbor-portal       nginx -g daemon off;             Up      80/tcp                            
nginx               nginx -g daemon off;             Up      0.0.0.0:180->80/tcp,:::180->80/tcp
redis               docker-entrypoint.sh redis ...   Up      6379/tcp                          
registry            /entrypoint.sh /etc/regist ...   Up      5000/tcp                          
registryctl         /harbor/start.sh                 Up

2.6.5、配置harbor的dns内网解析

# 1、编辑od.com业务域,注意前滚一个序号
[root@szdc0-11 ~]# vim /var/named/od.com.zone
harbor             A    10.0.0.200

# 2、重启named
[root@szdc0-11 ~]# systemctl restart named

# 3、测试域名解析
[root@szdc0-11 ~]# dig -t A harbor.od.com +short
10.0.0.100

2.6.6、安装nginx代理harbor

[root@szdc0-200 ~]# yum install nginx -y
[root@szdc0-200 ~]# rpm -qa nginx
nginx-1.16.1-3.el7.x86_64
[root@szdc0-200 ~]# vim /etc/nginx/conf.d/harbor.od.com.conf
server {
    listen       80;
    server_name  harbor.od.com;

    client_max_body_size 1000m;

    location / {
        proxy_pass http://127.0.0.1:180;
    }
}
[root@szdc0-200 ~]# nginx -t
[root@szdc0-200 ~]# systemctl start nginx && systemctl enable nginx

2.6.7、浏览器测试

地址:http://harbor.od.com/

账号密码:admin/Harbor12345

image-20210423181641696

2.6.8、测试harbor使用

1)harbor创建public仓库,访问级别为公开

image-20210423182046709

2)拉取镜像并推送

[root@szdc0-200 ~]# docker pull nginx:1.7.9
[root@szdc0-200 ~]# docker images|grep 1.7.9
nginx                           1.7.9                      84581e99d807   6 years ago     91.7MB
[root@szdc0-200 ~]# docker tag 84581e99d807 harbor.od.com/public/nginx:v1.7.9
[root@szdc0-200 ~]# docker login harbor.od.com
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
[root@szdc0-200 ~]# docker push harbor.od.com/public/nginx:v1.7.9
The push refers to repository [harbor.od.com/public/nginx]
5f70bf18a086: Pushed 
4b26ab29a475: Pushed 
ccb1d68e3fb7: Pushed 
e387107e2065: Pushed 
63bf84221cce: Pushed 
e02dce553481: Pushed 
dea2e4984e29: Pushed 
v1.7.9: digest: sha256:b1f5935eb2e9e2ae89c0b3e2e148c19068d91ca502e857052f14db230443e4c2 size: 3012

image-20210424103643995

三、部署etcd集群

3.1、集群规划

主机名 角色 ip
szdc0-12.host.com etcd leader 10.0.0.12
szdc0-21.host.com etcd fowler 10.0.0.21
szdc0-22.host.com etcd fowler 10.0.0.22

注意:这里部署文档以szdc0-12.host.com主机为例,另外两台主机安装部署方法类似

3.2、安装部署

1)创建基于根证书的config配置文件

[root@szdc0-200 harbor]# cd /opt/certs/
[root@szdc0-200 certs]# vim /opt/certs/ca-config.json
{
    "signing": {
        "default": {
            "expiry": "175200h"
        },
        "profiles": {
            "server": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth"
                ]
            },
            "client": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}

证书类型说明:

  • client certificate:客户端使用,用于服务端认证客户端,例如etcdctl、etcd proxy、fleetctl、docker客户端
  • server certificate:服务器端使用,客户端已验证服务端身份,例如docker服务端、kube-apiserver
  • peer certificate:双向证书,用于etcd集群成员间通信

2)创建生成自签证书签名请求(csr)的JSON配置文件

[root@szdc0-200 certs]# vi etcd-peer-csr.json
{
    "CN": "k8s-etcd",
    "hosts": [
        "10.0.0.11",
        "10.0.0.12",
        "10.0.0.21",
        "10.0.0.22"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}

3)生成etcd证书和私钥

[root@szdc0-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json|cfssl-json -bare etcd-peer
2021/04/24 10:51:44 [INFO] generate received request
2021/04/24 10:51:44 [INFO] received CSR
2021/04/24 10:51:44 [INFO] generating key: rsa-2048
2021/04/24 10:51:45 [INFO] encoded CSR
2021/04/24 10:51:45 [INFO] signed certificate with serial number 432984615086878011635103337106412816218660665284
2021/04/24 10:51:45 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@szdc0-200 certs]# ll etcd*
-rw-r--r-- 1 root root 1062 Apr 24 10:51 etcd-peer.csr
-rw-r--r-- 1 root root  363 Apr 24 10:50 etcd-peer-csr.json
-rw------- 1 root root 1679 Apr 24 10:51 etcd-peer-key.pem
-rw-r--r-- 1 root root 1428 Apr 24 10:51 etcd-peer.pem

4)创建etcd用户

[root@szdc0-12 ~]# useradd -s /sbin/nologin  -M etcd

5)软件下载

etcd下载地址:https://github.com/etcd-io/etcd/tags

[root@szdc0-12 certs]# cd /opt/src/
[root@szdc0-12 src]# wget https://github.com/etcd-io/etcd/releases/download/v3.1.20/etcd-v3.1.20-linux-amd64.tar.gz
[root@szdc0-12 src]# tar xf etcd-v3.1.20-linux-amd64.tar.gz -C /opt
[root@szdc0-12 src]# cd /opt/
[root@szdc0-12 opt]# mv etcd-v3.1.20-linux-amd64/ etcd-v3.1.20
[root@szdc0-12 opt]# ln -s /opt/etcd-v3.1.20/ /opt/etcd
[root@szdc0-12 opt]# ls -l /opt/
total 0
lrwxrwxrwx 1 root   root   18 Apr 24 11:11 etcd -> /opt/etcd-v3.1.20/
drwxr-xr-x 3 478493 89939 123 Oct 11  2018 etcd-v3.1.20
drwxr-xr-x 2 root   root   45 Apr 24 11:09 src

6)创建目录,拷贝证书,私钥

[root@szdc0-12 opt]# mkdir -p /opt/etcd/certs /data/etcd /data/logs/etcd-server

#将运维主机上生成的ca.pem,etcd-peer-key.pem,etcd-peer.pem拷贝到/ope/etcd/certs目录中,注意私钥权限600
[root@szdc0-12 opt]# cd /opt/etcd/certs
[root@szdc0-12 certs]# scp  szdc0-200:/opt/certs/ca.pem /opt/etcd/certs/
[root@szdc0-12 certs]# scp  szdc0-200:/opt/certs/etcd-peer.pem /opt/etcd/certs/
[root@szdc0-12 certs]# scp  szdc0-200:/opt/certs/etcd-peer-key.pem /opt/etcd/certs/

# 修改权限
[root@szdc0-12 certs]# chown -R etcd.etcd /opt/etcd/certs /data/etcd /data/logs/etcd-server
[root@szdc0-12 certs]# ll
total 12
-rw-r--r-- 1 etcd etcd 1346 Apr 24 11:13 ca.pem
-rw------- 1 etcd etcd 1679 Apr 24 11:13 etcd-peer-key.pem
-rw-r--r-- 1 etcd etcd 1428 Apr 24 11:13 etcd-peer.pem

7)创建etcd服务启动脚本

[root@szdc0-12 certs]# vim /opt/etcd/etcd-server-startup.sh
#!/bin/sh
./etcd --name etcd-server-0-12 
       --data-dir /data/etcd/etcd-server 
       --listen-peer-urls https://10.0.0.12:2380 
       --listen-client-urls https://10.0.0.12:2379,http://127.0.0.1:2379 
       --quota-backend-bytes 8000000000 
       --initial-advertise-peer-urls https://10.0.0.12:2380 
       --advertise-client-urls https://10.0.0.12:2379,http://127.0.0.1:2379 
       --initial-cluster  etcd-server-0-12=https://10.0.0.12:2380,etcd-server-0-21=https://10.0.0.21:2380,etcd-server-0-22=https://10.0.0.22:2380 
       --ca-file ./certs/ca.pem 
       --cert-file ./certs/etcd-peer.pem 
       --key-file ./certs/etcd-peer-key.pem 
       --client-cert-auth  
       --trusted-ca-file ./certs/ca.pem 
       --peer-ca-file ./certs/ca.pem 
       --peer-cert-file ./certs/etcd-peer.pem 
       --peer-key-file ./certs/etcd-peer-key.pem 
       --peer-client-cert-auth 
       --peer-trusted-ca-file ./certs/ca.pem 
       --log-output stdout
       
# 脚本授权
[root@szdc0-12 certs]# chmod +x /opt/etcd/etcd-server-startup.sh

注意:etcd集群各主机的启动脚本略有不同,部署其他节点是需要注意

8)安装supervisor软件

[root@szdc0-12 certs]# yum install supervisor -y
[root@szdc0-12 certs]# systemctl start supervisord.service
[root@szdc0-12 certs]# systemctl enable supervisord.service

9)创建etcd-server的启动配置

[root@szdc0-12 certs]# vim /etc/supervisord.d/etcd-server.ini
[program:etcd-server-0-12]
command=/opt/etcd/etcd-server-startup.sh                        ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/etcd                                             ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
killasgroup=true                                                ; kill all process in a group
stopasgroup=true                                                ; stop all process in a group
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log           ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)

注意:etcd集群各主机启动配置略有不同,配置其他节点时注意修改

10)启动etcd服务并检查

[root@szdc0-12 certs]# supervisorctl update
etcd-server-0-12: added process group
[root@szdc0-12 certs]# supervisorctl status
etcd-server-0-12                 RUNNING   pid 1794, uptime 0:00:35
[root@szdc0-12 certs]# netstat -lntup|grep etcd
tcp        0      0 10.0.0.12:2379          0.0.0.0:*               LISTEN      1795/./etcd         
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      1795/./etcd         
tcp        0      0 10.0.0.12:2380          0.0.0.0:*               LISTEN      1795/./etcd 

11)安装部署启动检查所有集群规划的etcd服务

在szdc0-21机器上:

[root@szdc0-21 ~]# useradd -s /sbin/nologin -M etcd
[root@szdc0-21 ~]# mkdir /opt/src
[root@szdc0-21 ~]# cd /opt/src
[root@szdc0-21 src]# wget https://github.com/etcd-io/etcd/releases/download/v3.1.20/etcd-v3.1.20-linux-amd64.tar.gz
[root@szdc0-21 src]# tar xf etcd-v3.1.20-linux-amd64.tar.gz -C /opt
[root@szdc0-21 src]# cd /opt/
[root@szdc0-21 opt]# mv etcd-v3.1.20-linux-amd64/ etcd-v3.1.20
[root@szdc0-21 opt]# ln -s /opt/etcd-v3.1.20/ /opt/etcd
[root@szdc0-21 opt]# mkdir -p /opt/etcd/certs /data/etcd /data/logs/etcd-server
[root@szdc0-21 opt]# cd /opt/etcd/certs
[root@szdc0-21 certs]# scp szdc0-200:/opt/certs/ca.pem /opt/etcd/certs/
[root@szdc0-21 certs]# scp szdc0-200:/opt/certs/etcd-peer-key.pem /opt/etcd/certs/
[root@szdc0-21 certs]# scp szdc0-200:/opt/certs/etcd-peer.pem /opt/etcd/certs/
[root@szdc0-21 certs]# chown -R etcd.etcd /opt/etcd/certs /data/etcd /data/logs/etcd-server
[root@szdc0-21 certs]# vim /opt/etcd/etcd-server-startup.sh
#!/bin/sh
./etcd --name etcd-server-0-21 
       --data-dir /data/etcd/etcd-server 
       --listen-peer-urls https://10.0.0.21:2380 
       --listen-client-urls https://10.0.0.21:2379,http://127.0.0.1:2379 
       --quota-backend-bytes 8000000000 
       --initial-advertise-peer-urls https://10.0.0.21:2380 
       --advertise-client-urls https://10.0.0.21:2379,http://127.0.0.1:2379 
       --initial-cluster  etcd-server-0-12=https://10.0.0.12:2380,etcd-server-0-21=https://10.0.0.21:2380,etcd-server-0-22=https://10.0.0.22:2380 
       --ca-file ./certs/ca.pem 
       --cert-file ./certs/etcd-peer.pem 
       --key-file ./certs/etcd-peer-key.pem 
       --client-cert-auth  
       --trusted-ca-file ./certs/ca.pem 
       --peer-ca-file ./certs/ca.pem 
       --peer-cert-file ./certs/etcd-peer.pem 
       --peer-key-file ./certs/etcd-peer-key.pem 
       --peer-client-cert-auth 
       --peer-trusted-ca-file ./certs/ca.pem 
       --log-output stdout
[root@szdc0-21 certs]# chmod +x /opt/etcd/etcd-server-startup.sh
[root@szdc0-21 certs]# yum install supervisor -y
[root@szdc0-21 certs]# systemctl start supervisord.service
[root@szdc0-21 certs]# systemctl enable supervisord.service
[root@szdc0-21 certs]# vim /etc/supervisord.d/etcd-server.ini
[root@szdc0-21 certs]# cat /etc/supervisord.d/etcd-server.ini
[program:etcd-server-0-21]
command=/opt/etcd/etcd-server-startup.sh                        ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/etcd                                             ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
killasgroup=true                                                ; kill all process in a group
stopasgroup=true                                                ; stop all process in a group
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log           ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
[root@szdc0-21 certs]# supervisorctl update
[root@szdc0-21 certs]# supervisorctl status
[root@szdc0-21 certs]# netstat -lntup|grep etcd

在szdc0-21机器上:

[root@szdc0-22 ~]# useradd -s /sbin/nologin -M etcd
[root@szdc0-22 ~]# mkdir /opt/src
[root@szdc0-22 ~]# cd /opt/src
[root@szdc0-22 src]# wget https://github.com/etcd-io/etcd/releases/download/v3.1.20/etcd-v3.1.20-linux-amd64.tar.gz
[root@szdc0-22 src]# tar xf etcd-v3.1.20-linux-amd64.tar.gz -C /opt
[root@szdc0-22 src]# cd /opt/
[root@szdc0-22 opt]# mv etcd-v3.1.20-linux-amd64/ etcd-v3.1.20
[root@szdc0-22 opt]# ln -s /opt/etcd-v3.1.20/ /opt/etcd
[root@szdc0-22 opt]# mkdir -p /opt/etcd/certs /data/etcd /data/logs/etcd-server
[root@szdc0-22 opt]# cd /opt/etcd/certs
[root@szdc0-22 certs]# scp szdc0-200:/opt/certs/ca.pem /opt/etcd/certs/
[root@szdc0-22 certs]# scp szdc0-200:/opt/certs/etcd-peer-key.pem /opt/etcd/certs/
[root@szdc0-22 certs]# scp szdc0-200:/opt/certs/etcd-peer.pem /opt/etcd/certs/
[root@szdc0-22 certs]# chown -R etcd.etcd /opt/etcd/certs /data/etcd /data/logs/etcd-server
[root@szdc0-22 certs]# vim /opt/etcd/etcd-server-startup.sh
#!/bin/sh
./etcd --name etcd-server-0-22 
       --data-dir /data/etcd/etcd-server 
       --listen-peer-urls https://10.0.0.22:2380 
       --listen-client-urls https://10.0.0.22:2379,http://127.0.0.1:2379 
       --quota-backend-bytes 8000000000 
       --initial-advertise-peer-urls https://10.0.0.22:2380 
       --advertise-client-urls https://10.0.0.22:2379,http://127.0.0.1:2379 
       --initial-cluster  etcd-server-0-12=https://10.0.0.12:2380,etcd-server-0-21=https://10.0.0.21:2380,etcd-server-0-22=https://10.0.0.22:2380 
       --ca-file ./certs/ca.pem 
       --cert-file ./certs/etcd-peer.pem 
       --key-file ./certs/etcd-peer-key.pem 
       --client-cert-auth  
       --trusted-ca-file ./certs/ca.pem 
       --peer-ca-file ./certs/ca.pem 
       --peer-cert-file ./certs/etcd-peer.pem 
       --peer-key-file ./certs/etcd-peer-key.pem 
       --peer-client-cert-auth 
       --peer-trusted-ca-file ./certs/ca.pem 
       --log-output stdout
[root@szdc0-22 certs]# chmod +x /opt/etcd/etcd-server-startup.sh
[root@szdc0-22 certs]# yum install supervisor -y
[root@szdc0-22 certs]# systemctl start supervisord.service
[root@szdc0-22 certs]# systemctl enable supervisord.service
[root@szdc0-22 certs]# vim /etc/supervisord.d/etcd-server.ini
[root@szdc0-22 certs]# cat /etc/supervisord.d/etcd-server.ini
[program:etcd-server-0-22]
command=/opt/etcd/etcd-server-startup.sh                        ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/etcd                                             ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
killasgroup=true                                                ; kill all process in a group
stopasgroup=true                                                ; stop all process in a group
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log           ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
[root@szdc0-22 certs]# supervisorctl update
[root@szdc0-22 certs]# supervisorctl status
[root@szdc0-22 certs]# netstat -lntup|grep etcd

12)查看集群状态

[root@szdc0-12 etcd]# pwd
/opt/etcd
[root@szdc0-12 etcd]# ./etcdctl cluster-health
member 6cbdd801d2c800d9 is healthy: got healthy result from http://127.0.0.1:2379
member 74538ef5dc383e39 is healthy: got healthy result from http://127.0.0.1:2379
member f7a9c20602b8532e is healthy: got healthy result from http://127.0.0.1:2379
cluster is healthy
[root@szdc0-12 etcd]# ./etcdctl member list
6cbdd801d2c800d9: name=etcd-server-0-21 peerURLs=https://10.0.0.21:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.21:2379 isLeader=false
74538ef5dc383e39: name=etcd-server-0-22 peerURLs=https://10.0.0.22:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.22:2379 isLeader=false
f7a9c20602b8532e: name=etcd-server-0-12 peerURLs=https://10.0.0.12:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.12:2379 isLeader=true

四、部署kube-apiserver

4.1、集群规划

主机名 角色 ip
szdc0-21.host.com kube-apiserver 10.0.0.21
szdc0-22.host.com kube-apiserver 10.0.0.21
szdc0-11.host.com 4层负载均衡 10.0.0.11
szdc0-12.host.com 4层负载均衡 10.0.0.12

注意:这里10.0.0.1110.0.0.12使用nginx做4层负载均衡器,用keepalived跑一个vip:10.0.0.10,代理两个kube-apiserver,实现高可用

这里部署文档以szdc0-21.host.com主机为例,另外一台运算节点安装部署方法类似

4.2、软件下载

kubernetes官方github地址:https://github.com/kubernetes/kubernetes

[root@szdc0-21 ~]# cd /opt/src/
[root@szdc0-21 ~]# wget http://down.sunrisenan.com/k8s/kubernetes/kubernetes-server-linux-amd64-v1.15.2.tar.gz
[root@szdc0-21 src]# tar xf kubernetes-server-linux-amd64-v1.15.2.tar.gz -C /opt/
[root@szdc0-21 src]# cd /opt/
[root@szdc0-21 opt]# mv kubernetes/ kubernetes-v1.15.2
[root@szdc0-21 opt]# ln -s /opt/kubernetes-v1.15.2/ /opt/kubernetes

# 删除源码
[root@szdc0-21 opt]# cd kubernetes
[root@szdc0-21 kubernetes]# rm -f kubernetes-src.tar.gz 
[root@szdc0-21 kubernetes]# ll
total 1180
drwxr-xr-x 2 root root       6 Aug  5  2019 addons
-rw-r--r-- 1 root root 1205293 Aug  5  2019 LICENSES
drwxr-xr-x 3 root root      17 Aug  5  2019 server

# 删除docker镜像
[root@szdc0-21 kubernetes]# cd server/bin
[root@szdc0-21 bin]# rm -f *.tar
[root@szdc0-21 bin]# rm -f *_tag
[root@szdc0-21 bin]# ll
total 884636
-rwxr-xr-x 1 root root  43534816 Aug  5  2019 apiextensions-apiserver
-rwxr-xr-x 1 root root 100548640 Aug  5  2019 cloud-controller-manager
-rwxr-xr-x 1 root root 200648416 Aug  5  2019 hyperkube
-rwxr-xr-x 1 root root  40182208 Aug  5  2019 kubeadm
-rwxr-xr-x 1 root root 164501920 Aug  5  2019 kube-apiserver
-rwxr-xr-x 1 root root 116397088 Aug  5  2019 kube-controller-manager
-rwxr-xr-x 1 root root  42985504 Aug  5  2019 kubectl
-rwxr-xr-x 1 root root 119616640 Aug  5  2019 kubelet
-rwxr-xr-x 1 root root  36987488 Aug  5  2019 kube-proxy
-rwxr-xr-x 1 root root  38786144 Aug  5  2019 kube-scheduler
-rwxr-xr-x 1 root root   1648224 Aug  5  2019 mounte

4.3、签发证书

1)创建生成证书请求(csr)的JSON配置文件

[root@szdc0-200 ~]# vim /opt/certs/client-csr.json
{
    "CN": "k8s-node",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}

2)生成client证书和私钥

[root@szdc0-200 ~]# cd /opt/certs/
[root@szdc0-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json |cfssl-json -bare client
2021/04/24 14:29:09 [INFO] generate received request
2021/04/24 14:29:09 [INFO] received CSR
2021/04/24 14:29:09 [INFO] generating key: rsa-2048
2021/04/24 14:29:09 [INFO] encoded CSR
2021/04/24 14:29:09 [INFO] signed certificate with serial number 469093553578142587796569278755462370202199945657
2021/04/24 14:29:09 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@szdc0-200 certs]# ll client*
-rw-r--r-- 1 root root  993 Apr 24 14:29 client.csr
-rw-r--r-- 1 root root  280 Apr 24 14:28 client-csr.json
-rw------- 1 root root 1679 Apr 24 14:29 client-key.pem
-rw-r--r-- 1 root root 1363 Apr 24 14:29 client.pem

4.4、签发kube-apiserver证书

1)创建生成证书签名请求(csr)的josn配置文件

[root@szdc0-200 certs]# vim /opt/certs/apiserver-csr.json
{
    "CN": "k8s-apiserver",
    "hosts": [
        "127.0.0.1",
        "10.96.0.1",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local",
        "10.0.0.10",
        "10.0.0.21",
        "10.0.0.22",
        "10.0.0.200"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
} 

2)生成kube-apiserver证书和私钥

[root@szdc0-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json |cfssl-json -bare apiserver
2021/04/24 14:32:58 [INFO] generate received request
2021/04/24 14:32:58 [INFO] received CSR
2021/04/24 14:32:58 [INFO] generating key: rsa-2048
2021/04/24 14:32:59 [INFO] encoded CSR
2021/04/24 14:32:59 [INFO] signed certificate with serial number 360987419813662267261841338092576178422049939358
2021/04/24 14:32:59 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@szdc0-200 certs]# ll apiserver*
-rw-r--r-- 1 root root 1249 Apr 24 14:32 apiserver.csr
-rw-r--r-- 1 root root  566 Apr 24 14:32 apiserver-csr.json
-rw------- 1 root root 1679 Apr 24 14:32 apiserver-key.pem
-rw-r--r-- 1 root root 1598 Apr 24 14:32 apiserver.pem

4.5、运算节点配置

1)拷贝证书至各个运算节点,并创建配置

[root@szdc0-21 bin]# pwd
/opt/kubernetes/server/bin
[root@szdc0-21 bin]# mkdir cert
[root@szdc0-21 bin]# scp szdc0-200:/opt/certs/apiserver-key.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-21 bin]# scp szdc0-200:/opt/certs/apiserver.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-21 bin]# scp szdc0-200:/opt/certs/ca-key.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-21 bin]# scp szdc0-200:/opt/certs/ca.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-21 bin]# scp szdc0-200:/opt/certs/client-key.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-21 bin]# scp szdc0-200:/opt/certs/client.pem /opt/kubernetes/server/bin/cert/

2)创建审计配置文件

[root@szdc0-21 bin]# mkdir conf
[root@szdc0-21 bin]# vi conf/audit.yaml
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"

3)创建kube-apiserver启动脚本

[root@szdc0-21 bin]# vim /opt/kubernetes/server/bin/kube-apiserver.sh
#!/bin/bash
./kube-apiserver 
  --apiserver-count 2 
  --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log 
  --audit-policy-file ./conf/audit.yaml 
  --authorization-mode RBAC 
  --client-ca-file ./cert/ca.pem 
  --requestheader-client-ca-file ./cert/ca.pem 
  --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota 
  --etcd-cafile ./cert/ca.pem 
  --etcd-certfile ./cert/client.pem 
  --etcd-keyfile ./cert/client-key.pem 
  --etcd-servers https://10.0.0.12:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 
  --service-account-key-file ./cert/ca-key.pem 
  --service-cluster-ip-range 10.96.0.0/22 
  --service-node-port-range 3000-29999 
  --target-ram-mb=1024 
  --kubelet-client-certificate ./cert/client.pem 
  --kubelet-client-key ./cert/client-key.pem 
  --log-dir  /data/logs/kubernetes/kube-apiserver 
  --tls-cert-file ./cert/apiserver.pem 
  --tls-private-key-file ./cert/apiserver-key.pem 
  --v 2
  
[root@szdc0-21 bin]# chmod +x /opt/kubernetes/server/bin/kube-apiserver.sh
[root@szdc0-21 bin]# mkdir -p /data/logs/kubernetes/kube-apiserver

4)创建supervisor配置

[root@szdc0-21 bin]# vi /etc/supervisord.d/kube-apiserver.ini
[program:kube-apiserver-0-21]
command=/opt/kubernetes/server/bin/kube-apiserver.sh            ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                            ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
killasgroup=true                                                ; kill all process in a group
stopasgroup=true                                                ; stop all process in a group
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log        ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)

5)启动服务并检查

[root@szdc0-21 bin]# supervisorctl update
kube-apiserver-0-21: added process group
[root@szdc0-21 bin]# supervisorctl status
etcd-server-0-21                 RUNNING   pid 1305, uptime 0:49:58
kube-apiserver-0-21              RUNNING   pid 1462, uptime 0:01:04
[root@szdc0-21 bin]# netstat -lntup|grep kube-apiser
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      1463/./kube-apiserv 
tcp6       0      0 :::6443                 :::*                    LISTEN      1463/./kube-apiserv

6)安装部署启动检查所有集群规划机器

[root@szdc0-22 src]# tar xf kubernetes-server-linux-amd64-v1.15.2.tar.gz -C /opt/
[root@szdc0-22 src]# cd /opt/
[root@szdc0-22 opt]# mv kubernetes kubernetes-v1.15.2
[root@szdc0-22 opt]# ln -s /opt/kubernetes-v1.15.2/ /opt/kubernetes
[root@szdc0-22 opt]# cd kubernetes
[root@szdc0-22 kubernetes]# rm -f kubernetes-src.tar.gz 
[root@szdc0-22 kubernetes]# cd server/bin
[root@szdc0-22 bin]# rm -f *.tar
[root@szdc0-22 bin]# rm -f *_tag
[root@szdc0-22 bin]# mkdir cert
[root@szdc0-22 bin]# ll
total 884636
-rwxr-xr-x 1 root root  43534816 Aug  5  2019 apiextensions-apiserver
drwxr-xr-x 2 root root         6 Apr 24 14:48 cert
-rwxr-xr-x 1 root root 100548640 Aug  5  2019 cloud-controller-manager
-rwxr-xr-x 1 root root 200648416 Aug  5  2019 hyperkube
-rwxr-xr-x 1 root root  40182208 Aug  5  2019 kubeadm
-rwxr-xr-x 1 root root 164501920 Aug  5  2019 kube-apiserver
-rwxr-xr-x 1 root root 116397088 Aug  5  2019 kube-controller-manager
-rwxr-xr-x 1 root root  42985504 Aug  5  2019 kubectl
-rwxr-xr-x 1 root root 119616640 Aug  5  2019 kubelet
-rwxr-xr-x 1 root root  36987488 Aug  5  2019 kube-proxy
-rwxr-xr-x 1 root root  38786144 Aug  5  2019 kube-scheduler
-rwxr-xr-x 1 root root   1648224 Aug  5  2019 mounter
[root@szdc0-22 bin]# scp szdc0-200:/opt/certs/apiserver-key.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-22 bin]# scp szdc0-200:/opt/certs/apiserver.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-22 bin]# scp szdc0-200:/opt/certs/ca-key.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-22 bin]# scp szdc0-200:/opt/certs/ca.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-22 bin]# scp szdc0-200:/opt/certs/client-key.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-22 bin]# scp szdc0-200:/opt/certs/client.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-22 bin]# mkdir conf
[root@szdc0-22 bin]# vi conf/audit.yaml
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"
      
[root@szdc0-22 bin]# vi /opt/kubernetes/server/bin/kube-apiserver.sh
#!/bin/bash
./kube-apiserver 
  --apiserver-count 2 
  --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log 
  --audit-policy-file ./conf/audit.yaml 
  --authorization-mode RBAC 
  --client-ca-file ./cert/ca.pem 
  --requestheader-client-ca-file ./cert/ca.pem 
  --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota 
  --etcd-cafile ./cert/ca.pem 
  --etcd-certfile ./cert/client.pem 
  --etcd-keyfile ./cert/client-key.pem 
  --etcd-servers https://10.0.0.12:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 
  --service-account-key-file ./cert/ca-key.pem 
  --service-cluster-ip-range 10.96.0.0/22 
  --service-node-port-range 3000-29999 
  --target-ram-mb=1024 
  --kubelet-client-certificate ./cert/client.pem 
  --kubelet-client-key ./cert/client-key.pem 
  --log-dir  /data/logs/kubernetes/kube-apiserver 
  --tls-cert-file ./cert/apiserver.pem 
  --tls-private-key-file ./cert/apiserver-key.pem 
  --v 2

[root@szdc0-22 bin]# chmod +x /opt/kubernetes/server/bin/kube-apiserver.sh
[root@szdc0-22 bin]# mkdir -p /data/logs/kubernetes/kube-apiserver
[root@szdc0-22 bin]# vi /etc/supervisord.d/kube-apiserver.ini
[program:kube-apiserver-0-22]
command=/opt/kubernetes/server/bin/kube-apiserver.sh            ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                            ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
killasgroup=true                                                ; kill all process in a group
stopasgroup=true                                                ; stop all process in a group
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log        ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)
[root@szdc0-22 bin]# supervisorctl update
kube-apiserver-0-22: added process group
[root@szdc0-22 bin]# supervisorctl status
etcd-server-0-22                 RUNNING   pid 3245, uptime 1:13:12
kube-apiserver-0-22              RUNNING   pid 3361, uptime 0:00:44
[root@szdc0-22 bin]# netstat -lntup|grep kube-apiser
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      3362/./kube-apiserv 
tcp6       0      0 :::6443                 :::*                    LISTEN      3362/./kube-apiserv

4.6、配置四层反向代理

1)部署nginx

# 在szdc0-11和szdc0-12上
~]# yum install nginx -y

2)配置4层代理

# 在szdc0-11和szdc0-12上,在http块外
~]# vim /etc/nginx/nginx.conf
stream {
    upstream kube-apiserver {
        server 10.0.0.21:6443     max_fails=3 fail_timeout=30s;
        server 10.0.0.22:6443     max_fails=3 fail_timeout=30s;
    }
    server {
        listen 7443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
    }
}

~]# nginx -t
~]# systemctl start nginx && systemctl enable nginx

3)部署keepalived服务

# 在szdc0-11和szdc0-12上
~]# yum install keepalived -y

4)配置keepalived

配置监控端口脚本

# 在szdc0-11和szdc0-12上
~]# vi /etc/keepalived/check_port.sh
#!/bin/bash
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
        PORT_PROCESS=`ss -lnt|grep $CHK_PORT|wc -l`
        if [ $PORT_PROCESS -eq 0 ];then
                echo "Port $CHK_PORT Is Not Used,End."
                exit 1
        fi
else
        echo "Check Port Cant Be Empty!"
fi
~]# chmod +x /etc/keepalived/check_port.sh

在szdc0-11上配置keepalived master:

[root@szdc0-11 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id 10.0.0.11

}

vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 10.0.0.11
    nopreempt

    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
         chk_nginx
    }
    virtual_ipaddress {
        10.0.0.10
    }
}

在szdc0-12上配置keepalived 备:

[root@szdc0-12 etcd]# vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived
global_defs {
    router_id 10.0.0.12
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 251
    mcast_src_ip 10.0.0.12
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        10.0.0.10
    }
}

5)启动keepalived

# 在szdc0-11和szdc0-12上
~]# systemctl start keepalived && systemctl enable keepalived

6)检查VIP

[root@szdc0-11 ~]# ip a |grep eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    inet 10.0.0.11/24 brd 10.0.0.255 scope global eth0
    inet 10.0.0.10/32 scope global eth0
    
[root@szdc0-12 etcd]# ip a |grep eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    inet 10.0.0.12/24 brd 10.0.0.255 scope global eth0

7)启动代理并检查

[root@szdc0-21 bin]# netstat -lntup|grep kube-apiser
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      1463/./kube-apiserv 
tcp6       0      0 :::6443                 :::*                    LISTEN      1463/./kube-apiserv
[root@szdc0-22 bin]# netstat -lntup|grep kube-apiser
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      3362/./kube-apiserv 
tcp6       0      0 :::6443                 :::*                    LISTEN      3362/./kube-apiserv
[root@szdc0-11 ~]# netstat -lntup|grep 7443
tcp        0      0 0.0.0.0:7443            0.0.0.0:*               LISTEN      3231/nginx: master
[root@szdc0-12 etcd]# netstat -lntup|grep 7443
tcp        0      0 0.0.0.0:7443            0.0.0.0:*               LISTEN      2024/nginx: master

五、部署controller-manager

5.1、集群规划

主机名 角色 ip
szdc0-21.host.com controller-manager 10.0.0.21
szdc0-22.host.com controller-manager 10.0.0.22

注意:这里部署文档以szdc0-21.host.com主机为例,另外一台运算节点安装部署方法类似

5.2、部署步骤

1)创建启动脚本

# 在szdc0-21和szdc0-22上
~]# vim /opt/kubernetes/server/bin/kube-controller-manager.sh
#!/bin/sh
./kube-controller-manager 
  --cluster-cidr 172.6.0.0/16 
  --leader-elect true 
  --log-dir /data/logs/kubernetes/kube-controller-manager 
  --master http://127.0.0.1:8080 
  --service-account-private-key-file ./cert/ca-key.pem 
  --service-cluster-ip-range 10.96.0.0/22 
  --root-ca-file ./cert/ca.pem 
  --v 2
  
~]# chmod +x /opt/kubernetes/server/bin/kube-controller-manager.sh
~]# mkdir -p /data/logs/kubernetes/kube-controller-manager

2)创建supervisor配置,注意修改配置

# 在szdc0-21和szdc0-22上
~]# vi /etc/supervisord.d/kube-conntroller-manager.ini
[program:kube-controller-manager-0-21]
command=/opt/kubernetes/server/bin/kube-controller-manager.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                              ; directory to cwd to before exec (def no cwd)
autostart=true                                                                    ; start at supervisord start (default: true)
autorestart=true                                                                  ; retstart at unexpected quit (default: true)
startsecs=30                                                                      ; number of secs prog must stay running (def. 1)
startretries=3                                                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                         ; setuid to this UNIX account to run the program
redirect_stderr=true                                                              ; redirect proc stderr to stdout (default false)
killasgroup=true                                                                  ; kill all process in a group
stopasgroup=true                                                                  ; stop all process in a group
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log  ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false

3)启动服务并检查

[root@szdc0-21 bin]# supervisorctl update
kube-controller-manager-0-21: added process group
[root@szdc0-21 bin]# supervisorctl status
etcd-server-0-21                 RUNNING   pid 1305, uptime 1:45:22
kube-apiserver-0-21              RUNNING   pid 1462, uptime 0:56:28
kube-controller-manager-0-21     RUNNING   pid 1552, uptime 0:00:34

[root@szdc0-22 bin]# supervisorctl update
kube-controller-manager-0-22: added process group
[root@szdc0-22 bin]# supervisorctl status
etcd-server-0-22                 RUNNING   pid 3245, uptime 2:00:15
kube-apiserver-0-22              RUNNING   pid 3361, uptime 0:47:47
kube-controller-manager-0-22     RUNNING   pid 3435, uptime 0:01:22

六、部署kube-scheduler

6.1、集群规划

主机名 角色 ip
szdc0-21.host.com kube-controller-manager 10.0.0.21
szdc0-22.host.com kube-controller-manager 10.0.0.22

注意:这里部署文档以szdc0-21.host.com主机为例,另外一台运算节点安装部署方法类似

6.2、部署步骤

1)创建启动脚本

# 在szdc0-21和szdc0-22上
~]# vi /opt/kubernetes/server/bin/kube-scheduler.sh
#!/bin/sh
./kube-scheduler 
  --leader-elect  
  --log-dir /data/logs/kubernetes/kube-scheduler 
  --master http://127.0.0.1:8080 
  --v 2

~]# chmod +x /opt/kubernetes/server/bin/kube-scheduler.sh
~]# mkdir -p /data/logs/kubernetes/kube-scheduler

2)创建supervisor配置

# 在szdc0-21和szdc0-22上,注意修改配置
~]# vi /etc/supervisord.d/kube-scheduler.ini
[program:kube-scheduler-0-21]
command=/opt/kubernetes/server/bin/kube-scheduler.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                               ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                     ; directory to cwd to before exec (def no cwd)
autostart=true                                                           ; start at supervisord start (default: true)
autorestart=true                                                         ; retstart at unexpected quit (default: true)
startsecs=30                                                             ; number of secs prog must stay running (def. 1)
startretries=3                                                           ; max # of serial start failures (default 3)
exitcodes=0,2                                                            ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                          ; signal used to kill process (default TERM)
stopwaitsecs=10                                                          ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                ; setuid to this UNIX account to run the program
redirect_stderr=true                                                     ; redirect proc stderr to stdout (default false)
killasgroup=true                                                         ; kill all process in a group
stopasgroup=true                                                         ; stop all process in a group
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                              ; emit events on stdout writes (default false)

3)启动服务并检查

[root@szdc0-21 bin]# supervisorctl update
kube-scheduler-0-21: added process group
[root@szdc0-21 bin]# supervisorctl status
etcd-server-0-21                 RUNNING   pid 1305, uptime 2:03:34
kube-apiserver-0-21              RUNNING   pid 1462, uptime 1:14:40
kube-controller-manager-0-21     RUNNING   pid 1552, uptime 0:18:46
kube-scheduler-0-21              RUNNING   pid 1581, uptime 0:01:19

[root@szdc0-22 bin]# supervisorctl update
kube-scheduler-0-22: added process group
[root@szdc0-22 bin]# supervisorctl status
etcd-server-0-22                 RUNNING   pid 3245, uptime 2:17:42
kube-apiserver-0-22              RUNNING   pid 3361, uptime 1:05:14
kube-controller-manager-0-22     RUNNING   pid 3435, uptime 0:18:49
kube-scheduler-0-22              RUNNING   pid 3459, uptime 0:01:22

七、kubect1命令配置

在各运算节点上:

~]# ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl

# 命令自动补全
~]# yum install bash-completion -y
~]# kubectl completion bash > /etc/bash_completion.d/kubectl

检查主控节点状态:

~]# which kubectl 
/usr/bin/kubectl
~]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-0               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}

八、部署kubelet

8.1、集群规划

主机名 角色 ip
szdc0-21.host.com kubelet 10.0.0.21
szdc0-22.host.com kubelet 10.0.0.22

注意:这里部署文档以szdc0-21.host.com主机为例,另外一台运算节点安装部署方法类似

8.2、部署步骤

1)创建生成正事签名请求(csr)的JSON配置文件

# 注意:把所有有可能用到的kubulet主机全加进去
[root@szdc0-200 certs]# vi /opt/certs/kubelet-csr.json
{
    "CN": "k8s-kubelet",
    "hosts": [
    "127.0.0.1",
    "10.0.0.10",
    "10.0.0.21",
    "10.0.0.22",
    "10.0.0.23",
    "10.0.0.24",
    "10.0.0.25",
    "10.0.0.26"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}

2)生成kubelet证书和私钥

[root@szdc0-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet
2021/04/24 16:19:40 [INFO] generate received request
2021/04/24 16:19:40 [INFO] received CSR
2021/04/24 16:19:40 [INFO] generating key: rsa-2048
2021/04/24 16:19:40 [INFO] encoded CSR
2021/04/24 16:19:40 [INFO] signed certificate with serial number 301819791217431241534592522212960523884453506155
2021/04/24 16:19:40 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@szdc0-200 certs]# ll kubelet*
-rw-r--r-- 1 root root 1098 Apr 24 16:19 kubelet.csr
-rw-r--r-- 1 root root  418 Apr 24 16:18 kubelet-csr.json
-rw------- 1 root root 1675 Apr 24 16:19 kubelet-key.pem
-rw-r--r-- 1 root root 1452 Apr 24 16:19 kubelet.pem

3)拷贝证书,私钥,注意私钥文件属性600

[root@szdc0-21 ~]# scp szdc0-200:/opt/certs/kubelet-key.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-21 ~]# scp szdc0-200:/opt/certs/kubelet.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-22 ~]# scp szdc0-200:/opt/certs/kubelet-key.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-22 ~]# scp szdc0-200:/opt/certs/kubelet.pem /opt/kubernetes/server/bin/cert/

4)创建配置

# 注意:在conf目录下
[root@szdc0-21 conf]# pwd
/opt/kubernetes/server/bin/conf

# set-cluster
kubectl config set-cluster myk8s 
  --certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem 
  --embed-certs=true 
  --server=https://10.0.0.10:7443 
  --kubeconfig=kubelet.kubeconfig
  
# set-credentials
kubectl config set-credentials k8s-node 
  --client-certificate=/opt/kubernetes/server/bin/cert/client.pem 
  --client-key=/opt/kubernetes/server/bin/cert/client-key.pem 
  --embed-certs=true 
  --kubeconfig=kubelet.kubeconfig

# set-context
kubectl config set-context myk8s-context 
  --cluster=myk8s 
  --user=k8s-node 
  --kubeconfig=kubelet.kubeconfig

# use-context 
kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig

[root@szdc0-21 conf]# ll
total 12
-rw-r--r-- 1 root root 2223 Apr 24 14:40 audit.yaml
-rw------- 1 root root 6199 Apr 24 16:26 kubelet.kubeconfig

5)创建资源配置文件

root@szdc0-21 conf]# vim /opt/kubernetes/server/bin/conf/k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: k8s-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node
  
# 集群角色用户生效
[root@szdc0-21 conf]# kubectl create -f k8s-node.yaml
clusterrolebinding.rbac.authorization.k8s.io/k8s-node created

# 查看集群角色
[root@szdc0-21 conf]# kubectl get clusterrolebinding k8s-node
NAME       AGE
k8s-node   44s
[root@szdc0-21 conf]# kubectl get clusterrolebinding k8s-node -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  creationTimestamp: "2021-04-24T08:29:07Z"
  name: k8s-node
  resourceVersion: "3739"
  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/k8s-node
  uid: dc2370cc-9bfe-405c-8f0d-961533e4c9b2
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node

6)拷贝生成的配置至其他节点

[root@szdc0-22 ~]# scp szdc0-21:/opt/kubernetes/server/bin/conf/kubelet.kubeconfig /opt/kubernetes/server/bin/conf/
[root@szdc0-22 ~]# scp szdc0-21:/opt/kubernetes/server/bin/conf/k8s-node.yaml /opt/kubernetes/server/bin/conf/

7)准备pause基础镜像

# 运维主机上
[root@szdc0-200 certs]# docker pull kubernetes/pause
[root@szdc0-200 certs]# docker images|grep pause
kubernetes/pause                latest                     f9d5de079539   6 years ago     240kB
[root@szdc0-200 certs]# docker tag f9d5de079539 harbor.od.com/public/pause:latest
[root@szdc0-200 certs]# docker push harbor.od.com/public/pause:latest

8)创建kubelet启动脚本

# 注意:kubelet集群各主机的启动脚本略有不同,部署其节点时注意修改hostname-override
[root@szdc0-21 conf]# vi /opt/kubernetes/server/bin/kubelet.sh
#!/bin/sh
./kubelet 
  --anonymous-auth=false 
  --cgroup-driver systemd 
  --cluster-dns 10.96.0.2 
  --cluster-domain cluster.local 
  --runtime-cgroups=/systemd/system.slice 
  --kubelet-cgroups=/systemd/system.slice 
  --fail-swap-on="false" 
  --client-ca-file ./cert/ca.pem 
  --tls-cert-file ./cert/kubelet.pem 
  --tls-private-key-file ./cert/kubelet-key.pem 
  --hostname-override szdc0-21.host.com 
  --image-gc-high-threshold 20 
  --image-gc-low-threshold 10 
  --kubeconfig ./conf/kubelet.kubeconfig 
  --log-dir /data/logs/kubernetes/kube-kubelet 
  --pod-infra-container-image harbor.od.com/public/pause:latest 
  --root-dir /data/kubelet
  
[root@szdc0-21 conf]# ls -l|grep kubelet.kubeconfig
-rw------- 1 root root 6199 Apr 24 16:26 kubelet.kubeconfig
[root@szdc0-21 conf]# chmod +x /opt/kubernetes/server/bin/kubelet.sh
[root@szdc0-21 conf]# mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet

9)创建supervisor配置

[root@szdc0-21 conf]# vi /etc/supervisord.d/kube-kubelet.ini
[program:kube-kubelet-0-21]
command=/opt/kubernetes/server/bin/kubelet.sh     ; the program (relative uses PATH, can take args)
numprocs=1                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin              ; directory to cwd to before exec (def no cwd)
autostart=true                                    ; start at supervisord start (default: true)
autorestart=true                                ; retstart at unexpected quit (default: true)
startsecs=30                                      ; number of secs prog must stay running (def. 1)
startretries=3                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                         ; setuid to this UNIX account to run the program
redirect_stderr=true                              ; redirect proc stderr to stdout (default false)
killasgroup=true                                  ; kill all process in a group
stopasgroup=true                                  ; stop all process in a group
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log   ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                       ; emit events on stdout writes (default false)

10)启动服务并检查

[root@szdc0-21 conf]# supervisorctl update
kube-kubelet-0-21: added process group
[root@szdc0-21 conf]# supervisorctl status
etcd-server-0-21                 RUNNING   pid 1305, uptime 2:48:29
kube-apiserver-0-21              RUNNING   pid 1462, uptime 1:59:35
kube-controller-manager-0-21     RUNNING   pid 1552, uptime 1:03:41
kube-kubelet-0-21                RUNNING   pid 1801, uptime 0:03:15
kube-scheduler-0-21              RUNNING   pid 1581, uptime 0:46:14

[root@szdc0-22 ~]# supervisorctl update
kube-kubelet-0-22: added process group
[root@szdc0-22 ~]# supervisorctl status
etcd-server-0-22                 RUNNING   pid 3245, uptime 3:03:02
kube-apiserver-0-22              RUNNING   pid 3361, uptime 1:50:34
kube-controller-manager-0-22     RUNNING   pid 3435, uptime 1:04:09
kube-kubelet-0-22                RUNNING   pid 4021, uptime 0:00:38
kube-scheduler-0-22              RUNNING   pid 3459, uptime 0:46:42

# 如果有报错查看日志
[root@szdc0-22 ~]# tail -fn 200 /data/logs/kubernetes/kube-kubelet/kubelet.stdout.log

11)检查运算节点

[root@szdc0-21 conf]# kubectl get nodes
NAME                STATUS   ROLES    AGE     VERSION
szdc0-21.host.com   Ready    <none>   5m34s   v1.15.2
szdc0-22.host.com   Ready    <none>   2m27s   v1.15.2
[root@szdc0-21 conf]# kubectl get nodes -o wide
NAME                STATUS   ROLES    AGE     VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION          CONTAINER-RUNTIME
szdc0-21.host.com   Ready    <none>   5m44s   v1.15.2   10.0.0.21     <none>        CentOS Linux 7 (Core)   3.10.0-693.el7.x86_64   docker://20.10.6
szdc0-22.host.com   Ready    <none>   2m37s   v1.15.2   10.0.0.22     <none>        CentOS Linux 7 (Core)   3.10.0-693.el7.x86_64   docker://20.10.6

# 给节点打标签
[root@szdc0-21 conf]# kubectl label node szdc0-21.host.com node-role.kubernetes.io/node=
node/szdc0-21.host.com labeled
[root@szdc0-21 conf]# kubectl label node szdc0-21.host.com node-role.kubernetes.io/master=
node/szdc0-21.host.com labeled
[root@szdc0-21 conf]# kubectl label node szdc0-22.host.com node-role.kubernetes.io/node=
node/szdc0-22.host.com labeled
[root@szdc0-21 conf]# kubectl label node szdc0-22.host.com node-role.kubernetes.io/master=
node/szdc0-22.host.com labeled

[root@szdc0-21 conf]# kubectl get nodes
NAME                STATUS   ROLES         AGE     VERSION
szdc0-21.host.com   Ready    master,node   7m4s    v1.15.2
szdc0-22.host.com   Ready    master,node   3m57s   v1.15.2

九、部署kube-proxy

9.1、集群规划

主机名 角色 ip
szdc0-21.host.com kube-proxy 10.0.0.21
szdc0-22.host.com kube-proxy 10.0.0.22

注意:这里部署文档以szdc0-21.host.com主机为例,另外一台运算节点安装部署方法类似

9.2、部署步骤

1)创建生成证书签名请求(csr)的JSON文件

[root@szdc0-200 certs]# vi /opt/certs/kube-proxy-csr.json
{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "beijing",
            "L": "beijing",
            "O": "od",
            "OU": "ops"
        }
    ]
}

2)生成kubelet证书和私钥

[root@szdc0-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client
2021/04/24 17:20:29 [INFO] generate received request
2021/04/24 17:20:29 [INFO] received CSR
2021/04/24 17:20:29 [INFO] generating key: rsa-2048
2021/04/24 17:20:29 [INFO] encoded CSR
2021/04/24 17:20:29 [INFO] signed certificate with serial number 185252091870579448935356842900642893459983568399
2021/04/24 17:20:29 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@szdc0-200 certs]# ll kube-proxy*
-rw-r--r-- 1 root root 1005 Apr 24 17:20 kube-proxy-client.csr
-rw------- 1 root root 1675 Apr 24 17:20 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 Apr 24 17:20 kube-proxy-client.pem
-rw-r--r-- 1 root root  267 Apr 24 17:19 kube-proxy-csr.json

注意:这里的clent不能与其他的通用,上面CN变了,"CN": "system:kube-proxy"

3)拷贝证书

[root@szdc0-21 conf]# scp szdc0-200:/opt/certs/kube-proxy-client-key.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-21 conf]# scp szdc0-200:/opt/certs/kube-proxy-client.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-22 conf]# scp szdc0-200:/opt/certs/kube-proxy-client-key.pem /opt/kubernetes/server/bin/cert/
[root@szdc0-22 conf]# scp szdc0-200:/opt/certs/kube-proxy-client.pem /opt/kubernetes/server/bin/cert/

4)创建配置

# 注意:在conf目录下
[root@szdc0-21 conf]# pwd
/opt/kubernetes/server/bin/conf

# set-cluster
kubectl config set-cluster myk8s 
  --certificate-authority=/opt/kubernetes/server/bin/cert/ca.pem 
  --embed-certs=true 
  --server=https://10.0.0.10:7443 
  --kubeconfig=kube-proxy.kubeconfig
  
# set-credentials
kubectl config set-credentials kube-proxy 
  --client-certificate=/opt/kubernetes/server/bin/cert/kube-proxy-client.pem 
  --client-key=/opt/kubernetes/server/bin/cert/kube-proxy-client-key.pem 
  --embed-certs=true 
  --kubeconfig=kube-proxy.kubeconfig

# set-context
kubectl config set-context myk8s-context 
  --cluster=myk8s 
  --user=kube-proxy 
  --kubeconfig=kube-proxy.kubeconfig
  
# use-context
kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig

5)加载ipvs模块

# 两个节点都需要做
[root@szdc0-21 conf]# vi /root/ipvs.sh
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
  /sbin/modinfo -F filename $i &>/dev/null
  if [ $? -eq 0 ];then
    /sbin/modprobe $i
  fi
done

[root@szdc0-21 conf]# vi /root/ipvs.sh
[root@szdc0-21 conf]# sh /root/ipvs.sh 
[root@szdc0-21 conf]# lsmod |grep ip_vs
ip_vs_wrr              12697  0 
ip_vs_wlc              12519  0 
ip_vs_sh               12688  0 
ip_vs_sed              12519  0 
ip_vs_rr               12600  0 
ip_vs_pe_sip           12697  0 
nf_conntrack_sip       33860  1 ip_vs_pe_sip
ip_vs_nq               12516  0 
ip_vs_lc               12516  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_ftp              13079  0 
ip_vs_dh               12688  0 
ip_vs                 141092  24 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_pe_sip,ip_vs_lblcr,ip_vs_lblc
nf_nat                 26787  3 ip_vs_ftp,nf_nat_ipv4,nf_nat_masquerade_ipv4
nf_conntrack          133387  8 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_sip,nf_conntrack_ipv4
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

6)创建kube-proxy启动脚本

# 注意修改hostname-override
[root@szdc0-21 conf]# vi /opt/kubernetes/server/bin/kube-proxy.sh
#!/bin/sh
./kube-proxy 
  --cluster-cidr 172.6.0.0/16 
  --hostname-override szdc0-21.host.com 
  --proxy-mode=ipvs 
  --ipvs-scheduler=nq 
  --kubeconfig ./conf/kube-proxy.kubeconfig
  
[root@szdc0-21 conf]# ls -l|grep kube-proxy.kubeconfig 
-rw------- 1 root root 6215 Apr 24 17:25 kube-proxy.kubeconfig
[root@szdc0-21 conf]# chmod +x /opt/kubernetes/server/bin/kube-proxy.sh
[root@szdc0-21 conf]# mkdir -p /data/logs/kubernetes/kube-proxy

7)创建supervisor配置

[root@szdc0-21 conf]# vi /etc/supervisord.d/kube-proxy.ini
[program:kube-proxy-0-21]
command=/opt/kubernetes/server/bin/kube-proxy.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                           ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                 ; directory to cwd to before exec (def no cwd)
autostart=true                                                       ; start at supervisord start (default: true)
autorestart=true                                                     ; retstart at unexpected quit (default: true)
startsecs=30                                                         ; number of secs prog must stay running (def. 1)
startretries=3                                                       ; max # of serial start failures (default 3)
exitcodes=0,2                                                        ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                      ; signal used to kill process (default TERM)
stopwaitsecs=10                                                      ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                            ; setuid to this UNIX account to run the program
redirect_stderr=true                                                 ; redirect proc stderr to stdout (default false)
killasgroup=true                                                     ; kill all process in a group
stopasgroup=true                                                     ; stop all process in a group
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log     ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                         ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                             ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                          ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                          ; emit events on stdout writes (default false)

8)启动服务并检查

[root@szdc0-21 conf]# supervisorctl update
kube-proxy-0-21: added process group
[root@szdc0-21 conf]# supervisorctl status
etcd-server-0-21                 RUNNING   pid 1305, uptime 3:39:36
kube-apiserver-0-21              RUNNING   pid 1462, uptime 2:50:42
kube-controller-manager-0-21     RUNNING   pid 1552, uptime 1:54:48
kube-kubelet-0-21                RUNNING   pid 1801, uptime 0:54:22
kube-proxy-0-21                  RUNNING   pid 11937, uptime 0:04:07
kube-scheduler-0-21              RUNNING   pid 1581, uptime 1:37:21

[root@szdc0-22 conf]# supervisorctl update
kube-proxy-0-22: added process group
[root@szdc0-22 conf]# supervisorctl status
etcd-server-0-22                 RUNNING   pid 3245, uptime 3:54:32
kube-apiserver-0-22              RUNNING   pid 3361, uptime 2:42:04
kube-controller-manager-0-22     RUNNING   pid 3435, uptime 1:55:39
kube-kubelet-0-22                RUNNING   pid 4021, uptime 0:52:08
kube-proxy-0-22                  RUNNING   pid 14293, uptime 0:00:36
kube-scheduler-0-22              RUNNING   pid 3459, uptime 1:38:12

9)安装ipvsadm

# 两个节点都要安装
[root@szdc0-21 conf]# yum install ipvsadm -y
[root@szdc0-21 conf]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 nq
  -> 10.0.0.21:6443               Masq    1      0          0         
  -> 10.0.0.22:6443               Masq    1      0          0
[root@szdc0-21 conf]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   173m

十、完成部署并验证集群

1)创建资源配置清单

[root@szdc0-21 ~]# vim /root/nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: nginx-ds
spec:
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
      - name: my-nginx
        image: harbor.od.com/public/nginx:v1.7.9
        ports:
        - containerPort: 80

2)集群运算节点登录harbor

[root@szdc0-21 ~]# docker login harbor.od.com
[root@szdc0-22 ~]# docker login harbor.od.com

3)创建pod

[root@szdc0-21 ~]# kubectl create -f nginx-ds.yaml
daemonset.extensions/nginx-ds created
[root@szdc0-21 ~]# kubectl get pods
NAME             READY   STATUS    RESTARTS   AGE
nginx-ds-9q9jm   1/1     Running   0          10s
nginx-ds-vbwbk   1/1     Running   0          10s
[root@szdc0-21 ~]# kubectl get pods -o wide
NAME             READY   STATUS    RESTARTS   AGE    IP           NODE                NOMINATED NODE   READINESS GATES
nginx-ds-9q9jm   1/1     Running   0          108s   172.6.21.2   szdc0-21.host.com   <none>           <none>
nginx-ds-vbwbk   1/1     Running   0          108s   172.6.22.2   szdc0-22.host.com   <none>           <none>
[root@szdc0-21 ~]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-0               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}

4)注意

目前集群各个pod之间不能通信,没安装CoreDNS插件

[root@szdc0-21 ~]# kubectl get pods -o wide
NAME             READY   STATUS    RESTARTS   AGE    IP           NODE                NOMINATED NODE   READINESS GATES
nginx-ds-9q9jm   1/1     Running   0          108s   172.6.21.2   szdc0-21.host.com   <none>           <none>
nginx-ds-vbwbk   1/1     Running   0          108s   172.6.22.2   szdc0-22.host.com   <none>           <none>
[root@szdc0-21 ~]# kubectl exec -it nginx-ds-9q9jm /bin/bash
root@nginx-ds-9q9jm:/# ping 172.6.22.2 # 不通

十一、部署CNI网络插件-flannel

11.1、集群规划

主机名 角色 ip
szdc0-21.host.com flannel 10.0.0.21
szdc0-22.host.com flannel 10.0.0.22

注意:这里部署文档以szdc0-21.host.com主机为例,另外一台运算节点安装部署方法类似

11.2、部署步骤

1)软件下载

下载地址:https://github.com/flannel-io/flannel/releases

[root@szdc0-21 ~]# cd /opt/src/
[root@szdc0-21 src]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
[root@szdc0-21 src]# mkdir /opt/flannel-v0.11.0
[root@szdc0-21 src]# tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0/
[root@szdc0-21 src]# ln -s /opt/flannel-v0.11.0/ /opt/flannel
[root@szdc0-21 src]# ll /opt/flannel
lrwxrwxrwx 1 root root 21 Apr 24 18:33 /opt/flannel -> /opt/flannel-v0.11.0/
[root@szdc0-21 src]# ll /opt/flannel
flannel/         flannel-v0.11.0/ 
[root@szdc0-21 src]# ll /opt/flannel-v0.11.0/
total 34436
-rwxr-xr-x 1 root root 35249016 Jan 29  2019 flanneld
-rwxr-xr-x 1 root root     2139 Oct 23  2018 mk-docker-opts.sh
-rw-r--r-- 1 root root     4300 Oct 23  2018 README.md

2)拷贝证书

[root@szdc0-21 src]# mkdir /opt/flannel/cert/
[root@szdc0-21 src]# cd /opt/flannel/cert/
[root@szdc0-21 cert]# scp szdc0-200:/opt/certs/ca.pem /opt/flannel/cert/
[root@szdc0-21 cert]# scp szdc0-200:/opt/certs/client.pem /opt/flannel/cert/
[root@szdc0-21 cert]# scp szdc0-200:/opt/certs/client-key.pem /opt/flannel/cert/

3)创建fiannel配置

[root@szdc0-21 flannel]# pwd
/opt/flannel
[root@szdc0-21 flannel]# vi subnet.env
FLANNEL_NETWORK=172.6.0.0/16
FLANNEL_SUBNET=172.6.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false

注意:flannel集群各主机的配置略有不同,部署其他节点时之一修改。

4)创建启动脚本

[root@szdc0-21 flannel]# vi flanneld.sh
#!/bin/sh
./flanneld 
  --public-ip=10.0.0.21 
  --etcd-endpoints=https://10.0.0.12:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 
  --etcd-keyfile=./cert/client-key.pem 
  --etcd-certfile=./cert/client.pem 
  --etcd-cafile=./cert/ca.pem 
  --iface=eth0 
  --subnet-file=./subnet.env 
  --healthz-port=2401
  
[root@szdc0-21 flannel]# chmod +x /opt/flannel/flanneld.sh 
[root@szdc0-21 flannel]# mkdir -p /data/logs/flanneld

注意:flannel集群各主机的启动脚本略有不同,部署其他节点时注意修改

5)创建supervisor配置

[root@szdc0-21 flannel]# vim /etc/supervisord.d/flannel.ini
[program:flanneld-0-21]
command=/opt/flannel/flanneld.sh                             ; the program (relative uses PATH, can take args)
numprocs=1                                                   ; number of processes copies to start (def 1)
directory=/opt/flannel                                       ; directory to cwd to before exec (def no cwd)
autostart=true                                               ; start at supervisord start (default: true)
autorestart=true                                             ; retstart at unexpected quit (default: true)
startsecs=30                                                 ; number of secs prog must stay running (def. 1)
startretries=3                                               ; max # of serial start failures (default 3)
exitcodes=0,2                                                ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                              ; signal used to kill process (default TERM)
stopwaitsecs=10                                              ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                    ; setuid to this UNIX account to run the program
redirect_stderr=true                                         ; redirect proc stderr to stdout (default false)
killasgroup=true                                             ; kill all process in a group
stopasgroup=true                                             ; stop all process in a group
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log       ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                 ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                     ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                  ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                  ; emit events on stdout writes (default false)
killasgroup=true
stopasgroup=true

注意:flannel集群各主机的启动脚本略有不同,部署其他节点时注意修改

supervisord管理进程的时候,默认是不kill子进程的,需要在对应的服务.ini配置文件中加以下两个配置:

killasgroup=true
stopasgroup=true

6)操作etcd,增加host-gw

# 只需要在一台上做就行,etcd会自动同步
[root@szdc0-21 flannel]# cd /opt/etcd
[root@szdc0-21 etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.6.0.0/16", "Backend": {"Type": "host-gw"}}'
{"Network": "172.6.0.0/16", "Backend": {"Type": "host-gw"}}
[root@szdc0-21 etcd]# ./etcdctl get /coreos.com/network/config
{"Network": "172.6.0.0/16", "Backend": {"Type": "host-gw"}}

7)启动服务并检查

[root@szdc0-21 etcd]# supervisorctl update
flanneld-0-21: added process group
[root@szdc0-21 etcd]# supervisorctl status
etcd-server-0-21                 RUNNING   pid 1305, uptime 4:54:00
flanneld-0-21                    RUNNING   pid 33440, uptime 0:03:22
kube-apiserver-0-21              RUNNING   pid 1462, uptime 4:05:06
kube-controller-manager-0-21     RUNNING   pid 1552, uptime 3:09:12
kube-kubelet-0-21                RUNNING   pid 1801, uptime 2:08:46
kube-proxy-0-21                  RUNNING   pid 11937, uptime 1:18:31
kube-scheduler-0-21              RUNNING   pid 1581, uptime 2:51:45

[root@szdc0-22 flannel]# supervisorctl update
flanneld-0-22: added process group
[root@szdc0-22 flannel]# supervisorctl status
etcd-server-0-22                 RUNNING   pid 3245, uptime 5:10:18
flanneld-0-22                    RUNNING   pid 35469, uptime 0:01:50
kube-apiserver-0-22              RUNNING   pid 3361, uptime 3:57:50
kube-controller-manager-0-22     RUNNING   pid 3435, uptime 3:11:25
kube-kubelet-0-22                RUNNING   pid 4021, uptime 2:07:54
kube-proxy-0-22                  RUNNING   pid 14293, uptime 1:16:22
kube-scheduler-0-22              RUNNING   pid 3459, uptime 2:53:58

8)验证集群,pod之间网络互通

[root@szdc0-21 ~]# kubectl apply -f /root/nginx-ds.yaml 
daemonset.extensions/nginx-ds created
[root@szdc0-21 ~]# kubectl get pods -o wide
NAME             READY   STATUS    RESTARTS   AGE   IP           NODE                NOMINATED NODE   READINESS GATES
nginx-ds-7h72v   1/1     Running   0          18s   172.6.21.2   szdc0-21.host.com   <none>           <none>
nginx-ds-d28bq   1/1     Running   0          18s   172.6.22.2   szdc0-22.host.com   <none>           <none>

[root@szdc0-21 ~]# kubectl exec -it nginx-ds-7h72v /bin/bash
root@nginx-ds-7h72v:/# ping 172.6.22.2
PING 172.6.22.2 (172.6.22.2): 48 data bytes
56 bytes from 172.6.22.2: icmp_seq=0 ttl=62 time=0.445 ms
56 bytes from 172.6.22.2: icmp_seq=1 ttl=62 time=1.298 ms

[root@szdc0-22 flannel]# kubectl exec -it nginx-ds-d28bq /bin/bash
root@nginx-ds-d28bq:/# ping 172.6.21.2
PING 172.6.21.2 (172.6.21.2): 48 data bytes
56 bytes from 172.6.21.2: icmp_seq=0 ttl=62 time=0.824 ms
56 bytes from 172.6.21.2: icmp_seq=1 ttl=62 time=0.845 ms

[root@szdc0-21 ~]# ping 172.6.21.2
PING 172.6.21.2 (172.6.21.2) 56(84) bytes of data.
64 bytes from 172.6.21.2: icmp_seq=1 ttl=64 time=0.049 ms
[root@szdc0-21 ~]# ping 172.6.22.2
PING 172.6.22.2 (172.6.22.2) 56(84) bytes of data.
64 bytes from 172.6.22.2: icmp_seq=1 ttl=63 time=0.346 ms

[root@szdc0-22 ~]# ping 172.6.21.2
PING 172.6.21.2 (172.6.21.2) 56(84) bytes of data.
64 bytes from 172.6.21.2: icmp_seq=1 ttl=63 time=0.291 ms
[root@szdc0-22 ~]# ping 172.6.22.2
PING 172.6.22.2 (172.6.22.2) 56(84) bytes of data.
64 bytes from 172.6.22.2: icmp_seq=1 ttl=64 time=0.043 ms

为什么不通宿主机pod能相互通信?

[root@szdc0-21 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         10.0.0.254      0.0.0.0         UG    0      0        0 eth0
10.0.0.0        0.0.0.0         255.255.255.0   U     0      0        0 eth0
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
172.6.21.0      0.0.0.0         255.255.255.0   U     0      0        0 docker0
172.6.22.0      10.0.0.22       255.255.255.0   UG    0      0        0 eth0

[root@szdc0-22 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         10.0.0.254      0.0.0.0         UG    0      0        0 eth0
10.0.0.0        0.0.0.0         255.255.255.0   U     0      0        0 eth0
169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
172.6.21.0      10.0.0.21       255.255.255.0   UG    0      0        0 eth0
172.6.22.0      0.0.0.0         255.255.255.0   U     0      0        0 docker0

本质上是添加了路由规则:

[root@szdc0-21 flannel]# route add -net 172.6.22.0/24 gw 10.0.0.22 dev eth0
[root@szdc0-22 flannel]# route add -net 172.6.21.0/24 gw 10.0.0.21 dev eth0

9)在各个节点上优化iptables规则

现在默认pod之间访问走原地址NAT,即访问显示的是宿主机的ip,需要优化为显示pod的ip地址

# 安装iptables
[root@szdc0-21 ~]# yum install iptables-services -y
[root@szdc0-21 ~]# systemctl start iptables.service && systemctl enable iptables.service

优化szdc0-21主机:

[root@szdc0-21 ~]# iptables-save|grep -i postrouting
:POSTROUTING ACCEPT [5:303]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 172.6.21.0/24 ! -o docker0 -j MASQUERADE
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE

# 先删除原先的规则
[root@szdc0-21 ~]# iptables -t nat -D POSTROUTING -s 172.6.21.0/24 ! -o docker0 -j MASQUERADE
# 10.0.0.21主机上的,来源是172.6.21.0/24段的docker的ip,目标ip不是172.6.0.0/16段,网络发包不从docker0桥设备出站的,才进行转换
[root@szdc0-21 ~]# iptables -t nat -I POSTROUTING -s 172.6.21.0/24 ! -d 172.6.0.0/16 ! -o docker0 -j MASQUERADE
[root@szdc0-21 ~]# iptables-save|grep -i postrouting
:POSTROUTING ACCEPT [0:0]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -s 172.6.21.0/24 ! -d 172.6.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE

# 把默认禁止规则删掉
[root@szdc0-21 ~]# iptables-save | grep -i reject
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
[root@szdc0-21 ~]# iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
[root@szdc0-21 ~]# iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited
[root@szdc0-21 ~]# iptables-save > /etc/sysconfig/iptables
[root@szdc0-21 ~]# service iptables save
iptables: Saving firewall rules to /etc/sysconfig/iptables:[  OK  ]

优化szdc0-22主机:

[root@szdc0-22 ~]# iptables-save|grep -i postrouting
:POSTROUTING ACCEPT [4:240]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 172.6.22.0/24 ! -o docker0 -j MASQUERADE
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE

# 先删除原先的规则
[root@szdc0-22 ~]# iptables -t nat -D POSTROUTING -s 172.6.22.0/24 ! -o docker0 -j MASQUERADE
# 10.0.0.22主机上的,来源是172.6.22.0/24段的docker的ip,目标ip不是172.6.0.0/16段,网络发包不从docker0桥设备出站的,才进行转换
[root@szdc0-22 ~]# iptables -t nat -I POSTROUTING -s 172.6.22.0/24 ! -d 172.6.0.0/16 ! -o docker0 -j MASQUERADE
[root@szdc0-22 ~]# iptables-save|grep -i postrouting
:POSTROUTING ACCEPT [2:120]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -s 172.6.22.0/24 ! -d 172.6.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE

# 把默认禁止规则删掉
[root@szdc0-22 ~]# iptables-save | grep -i reject
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
[root@szdc0-22 ~]# iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
[root@szdc0-22 ~]# iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited
[root@szdc0-22 ~]# iptables-save > /etc/sysconfig/iptables
[root@szdc0-22 ~]# service iptables save
iptables: Saving firewall rules to /etc/sysconfig/iptables:[  OK  ]

测试:

[root@szdc0-21 ~]# vim nginx-ds.yaml 
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: nginx-ds
spec:
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
      - name: my-nginx
        image: harbor.od.com/public/nginx:curl
        command: ["nginx","-g","daemon off;"]
        ports:
        - containerPort: 80
        
[root@szdc0-21 ~]# kubectl apply -f nginx-ds.yaml
[root@szdc0-21 ~]# kubectl get pods -o wide
NAME             READY   STATUS    RESTARTS   AGE   IP           NODE                NOMINATED NODE   READINESS GATES
nginx-ds-5782b   1/1     Running   0          5s    172.6.22.2   szdc0-22.host.com   <none>           <none>
nginx-ds-lbxlb   1/1     Running   0          5s    172.6.21.2   szdc0-21.host.com   <none>           <none>

# 进入一个容器,使用curl命令,curl另一个容器ip,打开另一个终端,查看容器日志
[root@szdc0-21 ~]# kubectl exec -it nginx-ds-lbxlb /bin/bash
root@nginx-ds-lbxlb:/# curl 172.6.22.2

[root@szdc0-22 ~]# kubectl logs -f nginx-ds-5782b
10.0.0.21 - - [24/Apr/2021:12:07:24 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-" # 没优化之前
172.6.21.2 - - [24/Apr/2021:12:10:39 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-" # 优化之后

十二、部署服务发现插件–coredns

12.1、部署K8S的内网资源配置清单http服务

在运维主机上,配置一个nginx虚拟主机,用以提供k8s统一资源配置清单访问入口

1)配置nginx

[root@szdc0-200 ~]# vim /etc/nginx/conf.d/k8s-yaml.od.com.conf
server {
    listen 80;
    server_name k8s-yaml.od.com;

    location / {
        autoindex on;
        default_type text/plain;
        root /data/k8s-yaml;
   }
}
[root@szdc0-200 ~]# mkdir -p /data/k8s-yaml/coredns
[root@szdc0-200 ~]# nginx -t
[root@szdc0-200 ~]# nginx -s reload

以后所有的资源配置清单统一放置在运维主机上的/data/k8s-yaml目录即可

2)配置内网DNS解析

# 注意序列号前滚
[root@szdc0-11 ~]# vim /var/named/od.com.zone
k8s-yaml           A    10.0.0.200
[root@szdc0-11 ~]# named-checkconf 
[root@szdc0-11 ~]# systemctl restart named
[root@szdc0-11 ~]# dig -t A k8s-yaml.od.com @10.0.0.11 +short
10.0.0.200

12.2、部署coredns

coredns官方github地址:https://github.com/coredns/coredns

coredns官方dockerhub地址:https://hub.docker.com/r/coredns/coredns

1)准备coredns镜像

[root@szdc0-200 ~]# docker pull coredns/coredns:1.6.1
[root@szdc0-200 ~]# docker images|grep coredns
coredns/coredns                 1.6.1                      c0f6e815079e   21 months ago   42.2MB
[root@szdc0-200 ~]# docker tag c0f6e815079e harbor.od.com/public/coredns:v1.6.1
[root@szdc0-200 ~]# docker push harbor.od.com/public/coredns:v1.6.1

2)准备资源配置清单

# 创建资源配置清单目录
[root@szdc0-200 ~]# mkdir -p /data/k8s-yaml/coredns && cd /data/k8s-yaml/coredns/

rbac资源配置文件:rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system

configmap资源配置文件:cm.yaml

apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        log
        health
        ready
        kubernetes cluster.local 10.96.0.0/22
        forward . 192.168.6.241
        cache 30
        loop
        reload
        loadbalance
       }

Deployment资源配置文件:dp.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: coredns
  template:
    metadata:
      labels:
        k8s-app: coredns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      containers:
      - name: coredns
        image: harbor.od.com/public/coredns:v1.6.1
        args:
        - -conf
        - /etc/coredns/Corefile
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile

Service资源配置文件:svc.yaml

apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
  clusterIP: 10.96.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
  - name: metrics
    port: 9153
    protocol: TCP

3)依次执行创建

[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/cm.yaml
configmap/coredns created
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/dp.yaml
deployment.apps/coredns created
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/svc.yaml
service/coredns created

4)查看

[root@szdc0-21 ~]# kubectl get all -n kube-system
NAME                           READY   STATUS    RESTARTS   AGE
pod/coredns-6b6c4f9648-c6mdg   1/1     Running   0          59s


NAME              TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
service/coredns   ClusterIP   10.96.0.2    <none>        53/UDP,53/TCP,9153/TCP   51s


NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns   1/1     1            1           59s

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/coredns-6b6c4f9648   1         1         1       59s

[root@szdc0-21 ~]# kubectl create deployment nginx-dp --image=harbor.od.com/public/nginx:v1.7.9 -n kube-public
deployment.apps/nginx-dp created
[root@szdc0-21 ~]# kubectl expose deployment nginx-dp --port=80 -n kube-public
service/nginx-dp exposed
[root@szdc0-21 ~]# kubectl get service -n kube-public
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
nginx-dp   ClusterIP   10.96.3.94   <none>        80/TCP    9s
[root@szdc0-21 ~]# dig -t A nginx-dp.kube-public.svc.cluster.local. @10.96.0.2 +short
10.96.3.94

十三、部署服务暴露插件–traefik

K8S的DNS实现了服务在集群“内”被自动发现,那如何是的服务在K8S集群外被使用和访问呢?

  • 使用NodePort型的Service:无法使用kube-proxy的ipvs模型,只能使用iptables模型
  • 使用Ingress资源:Ingress只能调度并暴露7层应用,特指http和https协议

traefik官方github地址:https://github.com/traefik/traefik

traefik官方dockerhub地址:https://hub.docker.com/_/traefik

1)准备traefik镜像

[root@szdc0-200 ~]# docker pull traefik:v1.7.2-alpine
[root@szdc0-200 ~]# docker images|grep traefik
traefik                         v1.7.2-alpine              add5fac61ae5   2 years ago     72.4MB
[root@szdc0-200 ~]# docker tag add5fac61ae5 harbor.od.com/public/traefik:v1.7.2
[root@szdc0-200 ~]# docker push harbor.od.com/public/traefik:v1.7.2

2)准备资源配置清单

# 创建资源配置清单文件
[root@szdc0-200 ~]# mkdir -p /data/k8s-yaml/traefik/ && cd /data/k8s-yaml/traefik/

rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: traefik-ingress-controller
rules:
  - apiGroups:
      - ""
    resources:
      - services
      - endpoints
      - secrets
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: traefik-ingress-controller
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
  name: traefik-ingress-controller
  namespace: kube-system

ds.yaml

apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: traefik-ingress
  namespace: kube-system
  labels:
    k8s-app: traefik-ingress
spec:
  template:
    metadata:
      labels:
        k8s-app: traefik-ingress
        name: traefik-ingress
    spec:
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 60
      containers:
      - image: harbor.od.com/public/traefik:v1.7.2
        name: traefik-ingress
        ports:
        - name: controller
          containerPort: 80
          hostPort: 81
        - name: admin-web
          containerPort: 8080
        securityContext:
          capabilities:
            drop:
            - ALL
            add:
            - NET_BIND_SERVICE
        args:
        - --api
        - --kubernetes
        - --logLevel=INFO
        - --insecureskipverify=true
        - --kubernetes.endpoint=https://10.0.0.10:7443
        - --accesslog
        - --accesslog.filepath=/var/log/traefik_access.log
        - --traefiklog
        - --traefiklog.filepath=/var/log/traefik.log
        - --metrics.prometheus

svc.yaml

kind: Service
apiVersion: v1
metadata:
  name: traefik-ingress-service
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik-ingress
  ports:
    - protocol: TCP
      port: 80
      name: controller
    - protocol: TCP
      port: 8080
      name: admin-web

ingress.yaml

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: traefik-web-ui
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  rules:
  - host: traefik.od.com
    http:
      paths:
      - path: /
        backend:
          serviceName: traefik-ingress-service
          servicePort: 8080

3)依次执行创建

[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/rbac.yaml
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ds.yaml
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/svc.yaml
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ingress.yaml

[root@szdc0-21 ~]# kubectl get pods  -n kube-system -o wide
NAME                       READY   STATUS    RESTARTS   AGE     IP           NODE                NOMINATED NODE   READINESS GATES
coredns-6b6c4f9648-c6mdg   1/1     Running   0          39m     172.6.21.2   szdc0-21.host.com   <none>           <none>
traefik-ingress-9fjk7      1/1     Running   0          3m37s   172.6.22.3   szdc0-22.host.com   <none>           <none>
traefik-ingress-b9btp      1/1     Running   0          3m37s   172.6.21.3   szdc0-21.host.com   <none>           <none>
[root@szdc0-21 ~]# kubectl get svc  -n kube-system -o wide
NAME                      TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE     SELECTOR
coredns                   ClusterIP   10.96.0.2     <none>        53/UDP,53/TCP,9153/TCP   39m     k8s-app=coredns
traefik-ingress-service   ClusterIP   10.96.1.192   <none>        80/TCP,8080/TCP          3m45s   k8s-app=traefik-ingress

[root@szdc0-21 ~]# kubectl get all -n kube-system 
NAME                           READY   STATUS    RESTARTS   AGE
pod/coredns-6b6c4f9648-c6mdg   1/1     Running   0          45m
pod/traefik-ingress-9fjk7      1/1     Running   0          9m58s
pod/traefik-ingress-b9btp      1/1     Running   0          9m58s


NAME                              TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
service/coredns                   ClusterIP   10.96.0.2     <none>        53/UDP,53/TCP,9153/TCP   45m
service/traefik-ingress-service   ClusterIP   10.96.1.192   <none>        80/TCP,8080/TCP          9m50s

NAME                             DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/traefik-ingress   2         2         2       2            2           <none>          9m58s

NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/coredns   1/1     1            1           45m

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/coredns-6b6c4f9648   1         1         1       45m

4)解析域名

# 注意序列号前滚
[root@szdc0-11 ~]# vim /var/named/od.com.zone
traefik            A    10.0.0.10
[root@szdc0-11 ~]# named-checkconf 
[root@szdc0-11 ~]# systemctl restart named

5)配置反向代理

需要在szdc0-11和szdc0-12上配置:

[root@szdc0-11 ~]# vim /etc/nginx/conf.d/od.com.conf
upstream default_backend_traefik {
    server 10.0.0.21:81    max_fails=3 fail_timeout=10s;
    server 10.0.0.22:81    max_fails=3 fail_timeout=10s;
}
server {
    server_name *.od.com;

    location / {
        proxy_pass http://default_backend_traefik;
        proxy_set_header Host       $http_host;
        proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
    }
}
[root@szdc0-11 ~]# nginx -t
[root@szdc0-11 ~]# nginx -s reload

6)浏览器访问

访问 http://traefik.od.com/

image-20210425142906322

十四、部署GUI资源管理插件-dashboard

github地址:https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dashboard

下载地址:https://github.com/kubernetes/dashboard/releases

dockerhub地址:https://registry.hub.docker.com/r/kubernetesui/dashboard/

1)准备dashboard镜像

[root@szdc0-200 ~]# docker pull sunrisenan/kubernetes-dashboard-amd64:v1.10.1
[root@szdc0-200 ~]# docker pull sunrisenan/kubernetes-dashboard-amd64:v1.8.3
[root@szdc0-200 ~]# docker images |grep dash
sunrisenan/kubernetes-dashboard-amd64   v1.10.1                    f9aed6605b81   2 years ago     122MB
sunrisenan/kubernetes-dashboard-amd64   v1.8.3                     0c60bcf89900   3 years ago     102MB
[root@szdc0-200 ~]# docker tag f9aed6605b81  harbor.od.com/public/kubernetes-dashboard-amd64:v1.10.1
[root@szdc0-200 ~]# docker push harbor.od.com/public/kubernetes-dashboard-amd64:v1.10.1
[root@szdc0-200 ~]# docker tag 0c60bcf89900 harbor.od.com/public/kubernetes-dashboard-amd64:v1.8.3
[root@szdc0-200 ~]# docker push harbor.od.com/public/kubernetes-dashboard-amd64:v1.8.3

2)准备资源配置清单

# 创建目录
[root@szdc0-200 ~]# mkdir -p /data/k8s-yaml/dashboard && cd /data/k8s-yaml/dashboard

rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard-admin
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-admin
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard-admin
  namespace: kube-system

dp.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      priorityClassName: system-cluster-critical
      containers:
      - name: kubernetes-dashboard
        image: harbor.od.com/public/kubernetes-dashboard-amd64:v1.8.3
        resources:
          limits:
            cpu: 100m
            memory: 300Mi
          requests:
            cpu: 50m
            memory: 100Mi
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          # PLATFORM-SPECIFIC ARGS HERE
          - --auto-generate-certificates
        volumeMounts:
        - name: tmp-volume
          mountPath: /tmp
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard-admin
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"

svc.yaml

apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 443
    targetPort: 8443

ingress.yaml

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  rules:
  - host: dashboard.od.com
    http:
      paths:
      - backend:
          serviceName: kubernetes-dashboard
          servicePort: 443

3)依次创建资源

浏览器打开:http://k8s-yaml.od.com/dashboard/检查资源配置清单文件是否正确创建

image-20210425145654537

[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/rbac.yaml
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dp.yaml
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/svc.yaml
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/ingress.yaml

4)解析域名

# 注意前滚一个序列号
[root@szdc0-11 ~]# vim /var/named/od.com.zone
dashboard           A    10.0.0.10
[root@szdc0-11 ~]# systemctl restart named
[root@szdc0-11 ~]# dig dashboard.od.com @10.96.0.2 +short
10.0.0.10
[root@szdc0-11 ~]# dig dashboard.od.com @10.0.0.11 +short
10.0.0.10

5)浏览器访问

浏览器访问:http://dashboard.od.com

image-20210425151301579

6)配置https认证

签发证书

[root@szdc0-200 certs]# (umask 077; openssl genrsa -out dashboard.od.com.key 2048)
Generating RSA private key, 2048 bit long modulus
...........................................................+++
.........................................................................+++
e is 65537 (0x10001)
[root@szdc0-200 certs]# openssl req -new -key dashboard.od.com.key -out dashboard.od.com.csr -subj "/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops"
[root@szdc0-200 certs]# openssl x509 -req -in dashboard.od.com.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out dashboard.od.com.crt -days 3650
Signature ok
subject=/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops
Getting CA Private Key
[root@szdc0-200 certs]# ll dash*
-rw-r--r-- 1 root root 1196 Apr 25 15:15 dashboard.od.com.crt
-rw-r--r-- 1 root root 1005 Apr 25 15:14 dashboard.od.com.csr
-rw------- 1 root root 1675 Apr 25 15:14 dashboard.od.com.key

检查证书

[root@szdc0-200 certs]# cfssl-certinfo -cert dashboard.od.com.crt 
{
  "subject": {
    "common_name": "dashboard.od.com",
    "country": "CN",
    "organization": "OldboyEdu",
    "organizational_unit": "ops",
    "locality": "Beijing",
    "province": "BJ",
    "names": [
      "dashboard.od.com",
      "CN",
      "BJ",
      "Beijing",
      "OldboyEdu",
      "ops"
    ]
  },
  "issuer": {
    "common_name": "OldboyEdu",
    "country": "CN",
    "organization": "od",
    "organizational_unit": "ops",
    "locality": "beijing",
    "province": "beijing",
    "names": [
      "CN",
      "beijing",
      "beijing",
      "od",
      "ops",
      "OldboyEdu"
    ]
  },
  "serial_number": "10408511385517215812",
  "not_before": "2021-04-25T07:15:06Z",
  "not_after": "2031-04-23T07:15:06Z",
  "sigalg": "SHA256WithRSA",
  "authority_key_id": "",
  "subject_key_id": "",
  "pem": "-----BEGIN CERTIFICATE-----
MIIDRTCCAi0CCQCQcnX1XlzQRDANBgkqhkiG9w0BAQsFADBgMQswCQYDVQQGEwJD
TjEQMA4GA1UECBMHYmVpamluZzEQMA4GA1UEBxMHYmVpamluZzELMAkGA1UEChMC
b2QxDDAKBgNVBAsTA29wczESMBAGA1UEAxMJT2xkYm95RWR1MB4XDTIxMDQyNTA3
MTUwNloXDTMxMDQyMzA3MTUwNlowaTEZMBcGA1UEAwwQZGFzaGJvYXJkLm9kLmNv
bTELMAkGA1UEBhMCQ04xCzAJBgNVBAgMAkJKMRAwDgYDVQQHDAdCZWlqaW5nMRIw
EAYDVQQKDAlPbGRib3lFZHUxDDAKBgNVBAsMA29wczCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBAMmpin99sFKUmqE4g9Apvcxo4sJuvl5U0EtYpYGr/oYe
K9h31cuYb4gzTgiOXJ2kuq83agw/qcj/vWwvu5CJrtAf5xHSsxhTMamvstEjgGOW
WKQwhsIT1Pqx3BcRYP232mnAeXFKd9pug7dzQLu4/n3swtTuXOo9zsNhapI1UeYB
gbZSyURbdeu4/O4McC3dSaCCon3hIHy7G3DImttvYmZgBgYsvN5uOk2AVz3/PSO4
WGCfNEX8lamrGyc19x9m8UQS8dsmmY2dVOGMA+L8eeiOC+h1P3qmrOdW25ZpzY9s
SjL/eq79htJd1ITpr8BBTgH2Fms7QMoN/hJKBg4uI2MCAwEAATANBgkqhkiG9w0B
AQsFAAOCAQEAQk2BbPf4SOXM9XgLPD4pBtcvEd0W7aoLmGaLiWgUUs0upPpVeeTM
AT951IDmxSToJsq6BBPFi8KdpoZ88jB35CB6QoTLhoA39jRoRly/l++oCyniKrgA
rlmfWLeTUE99xdblrqOvYREE1FZwc+S8RfjeQg6OdIX3v3lYP4PBbF6r874DORIl
suirgXEJ4NdeuqtMP4j0hr8QN849/tBv6ID492GE0e/SKT2PTh2A74ODxE7jcZpp
1FKN/OvrlVw3C9zohkgccZDkQE7YQu/YoyqwvlOIu/2hlt3cpZc2vtdqRlgyI2qG
Qwb62Pvjkf6cTWnGqZdoNKK6DoFeCWI5TQ==
-----END CERTIFICATE-----
"
}

配置nginx

# 在szdc0-11和szdc0-12上
~]# mkdir /etc/nginx/certs
~]# scp szdc0-200:/opt/certs/dashboard.od.com.crt /etc/nginx/certs/
~]# scp szdc0-200:/opt/certs/dashboard.od.com.key /etc/nginx/certs/

~]# vim /etc/nginx/conf.d/dashboard.od.com.conf
server {
    listen       80;
    server_name  dashboard.od.com;

    rewrite ^(.*)$ https://${server_name}$1 permanent;
}
server {
    listen       443 ssl;
    server_name  dashboard.od.com;

    ssl_certificate "certs/dashboard.od.com.crt";
    ssl_certificate_key "certs/dashboard.od.com.key";
    ssl_session_cache shared:SSL:1m;
    ssl_session_timeout  10m;
    ssl_ciphers HIGH:!aNULL:!MD5;
    ssl_prefer_server_ciphers on;

    location / {
        proxy_pass http://default_backend_traefik;
        proxy_set_header Host       $http_host;
        proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
    }
}

[root@szdc0-11 ~]# nginx -t
[root@szdc0-11 ~]# nginx -s reload

7)获取kubernetes-dashboard-admin-token

[root@szdc0-21 ~]# kubectl get secrets -n kube-system 
NAME                                     TYPE                                  DATA   AGE
coredns-token-n8k78                      kubernetes.io/service-account-token   3      5h9m
default-token-btk2d                      kubernetes.io/service-account-token   3      23h
kubernetes-dashboard-admin-token-h4g65   kubernetes.io/service-account-token   3      32m
kubernetes-dashboard-key-holder          Opaque                                2      32m
traefik-ingress-controller-token-xpklk   kubernetes.io/service-account-token   3      3h52m
[root@szdc0-21 ~]# kubectl describe secret kubernetes-dashboard-admin-token-w46s2 -n kube-system | grep token
Error from server (NotFound): secrets "kubernetes-dashboard-admin-token-w46s2" not found
[root@szdc0-21 ~]# kubectl describe secret kubernetes-dashboard-admin-token-h4g65 -n kube-system | grep token
Name:         kubernetes-dashboard-admin-token-h4g65
Type:  kubernetes.io/service-account-token
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi10b2tlbi1oNGc2NSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImRjMWFiMGZmLWQ2NDMtNDE4Mi04NzRjLTM2YmJmMWNmNjJlZSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiJ9.HKz4jTjq7E6fkOxgNGeh7qx29zq8cF7iDQzTcC_M_6yfAJ8n0Fd7n9F0VAmQ2qc9jjKaMLeWG-NnGW4EhZ7njxAllxnXT-WVba0RvlyBl16C_X_oaxtBqcC4x3pkx1rfihWL_kFmQvedvttNbr_SdlP3DpER3jKUaVwjw0EfsmM4bchmbXILXYVM8y2FtQrguo6ZqxG7w-dMby_5VBkMCgF5bOEhwUZStwPCNccFFoa8MduqBGXIpunCzYqduy3QiJdN9xbCFQADomDD90tc9lncHUOyR3mvdGGBNjxYM2r1i_AaI__Pe_KAKhNkqMFq8hfez2P17n5x9Rzb8O_rbg

8)验证token登录

image-20210425153543591

image-20210425153638098

十五、部署heapster

1)准备heapster镜像

[root@szdc0-200 ~]# docker pull sunrisenan/heapster:v1.5.4
[root@szdc0-200 ~]# docker images|grep heapster
sunrisenan/heapster                               v1.5.4                     c359b95ad38b   2 years ago     136MB
[root@szdc0-200 ~]# docker tag c359b95ad38b harbor.od.com/public/heapster:v1.5.4
[root@szdc0-200 ~]# docker push harbor.od.com/public/heapster:v1.5.4

2)准备资源配置清单

[root@szdc0-200 ~]# mkdir -p /data/k8s-yaml/heapster && cd /data/k8s-yaml/heapster

rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: heapster
  namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: heapster
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:heapster
subjects:
- kind: ServiceAccount
  name: heapster
  namespace: kube-system

dp.yaml

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: heapster
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: heapster
    spec:
      serviceAccountName: heapster
      containers:
      - name: heapster
        image: harbor.od.com/public/heapster:v1.5.4
        imagePullPolicy: IfNotPresent
        command:
        - /opt/bitnami/heapster/bin/heapster
        - --source=kubernetes:https://kubernetes.default

svc.yaml

apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: Heapster
  name: heapster
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 8082
  selector:
    k8s-app: heapster

3)应用资源配置清单

[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/heapster/rbac.yaml
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/heapster/dp.yaml
[root@szdc0-21 ~]# kubectl apply -f http://k8s-yaml.od.com/heapster/svc.yaml

4)查看

image-20210425154820200

[root@szdc0-21 ~]# kubectl top node
NAME                CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
szdc0-21.host.com   97m          4%     756Mi           19%       
szdc0-22.host.com   81m          4%     643Mi           16%       
[root@szdc0-21 ~]# kubectl top pod -n kube-public 
NAME                        CPU(cores)   MEMORY(bytes)   
nginx-dp-5dfc689474-5qjts   0m           1Mi 
作者:Lawrence

-------------------------------------------

个性签名:独学而无友,则孤陋而寡闻。做一个灵魂有趣的人!

扫描上面二维码关注我
如果你真心觉得文章写得不错,而且对你有所帮助,那就不妨帮忙“推荐"一下,您的“推荐”和”打赏“将是我最大的写作动力!
本文版权归作者所有,欢迎转载,但未经作者同意必须保留此段声明,且在文章页面明显位置给出原文连接.
原文地址:https://www.cnblogs.com/hujinzhong/p/14700673.html