1kubeadm安装ubuntu1804

一,ubuntu 1804 kubeadm 安装

1,系统初始化,环境预设

1,关闭swap,firewalld,selinux
swapoff  -a 
vi /etc/fstab   注释掉 swap  后  mount  -a  

ufw  status   
ubuntu默认不安装selinux 
apt  install  selinux-utils 此命令查看selinux 状态

apt-get  install  -y openssh-server   允许root 远程访问
vim  /etc/ssh/sshd_config
PermitRootLogin yes


2,设置主机名(所有修改)
vim  /etc/hosts  
172.20.7.132   k8s-master
172.20.7.133   k8s-node1
hostnamectl  set-hostname k8s-master

3,配置阿里云apt 更新源

cat >  /etc/apt/sources.list  <<EOF
deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse

deb http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse

deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse

deb http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse

deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse
EOF

apt-get  update  

重启机器:

4,安装docker-ce 
参考:https://yq.aliyun.com/articles/658984

如果已安装老版本docker-ce  删除之
sudo apt-get remove docker docker-engine docker.io

安装docker-ce 
##docker会与kubelet kubeadm kubectl 等匹配
apt-get update  

sudo apt-get install \
    apt-transport-https \
    ca-certificates \
    curl \
    software-properties-common  
  
  
官方docker-ce 源
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -

sudo apt-key fingerprint 0EBFCD88

设定稳定存储库
sudo add-apt-repository \
   "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
   $(lsb_release -cs) \
   stable"
   
 更新源
 sudo apt-get update
 
 查看可用版本docker-ce 
 apt-cache madison docker-ce
 
 安装最新docker-ce 
 sudo apt-get install docker-ce
 安装指定版本docker-ce 
 sudo apt-get install docker-ce=<VERSION>
 
 docker  -v
 
添加普通用户至docke组
sudo groupadd docker     #添加docker用户组
sudo gpasswd -a $USER docker     #将登陆用户加入到docker用户组中
newgrp docker     #更新用户组
docker ps    #测试docker命令是否可以使用sudo正常使用

 
设定加速器:
  sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
 "registry-mirrors": ["https://9916w1ow.mirror.aliyuncs.com"]
}
EOF


# sudo systemctl daemon-reload
# sudo systemctl restart docker
# systemctl    enable    docker


5,设置阿里云k8s 源
apt-get update && apt-get install -y apt-transport-https

curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg  | apt-key add -
 
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF #手动输入 EOF

apt-get update

apt-get install kubeadm=1.15.0-00 kubelet=1.15.0-00 kubectl=1.15.0-00
 systemctl  status  kubelet.service
 systemctl  enable/start   kubelet.service

查看需要哪些镜像
kubeadm config images list --kubernetes-version v1.15.0

k8s.gcr.io/kube-apiserver:v1.15.0
k8s.gcr.io/kube-controller-manager:v1.15.0
k8s.gcr.io/kube-scheduler:v1.15.0
k8s.gcr.io/kube-proxy:v1.15.0
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.3.10
k8s.gcr.io/coredns:1.3.1


拉取镜像
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.15.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.15.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.15.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1


docker  pull      quay-mirror.qiniu.com/coreos/flannel:v0.11.0-s390x
docker  tag  quay-mirror.qiniu.com/coreos/flannel:v0.11.0-s390x   quay.io/coreos/flannel:v0.10.0-s390x


重新打标签
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.0    k8s.gcr.io/kube-apiserver:v1.15.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.15.0   k8s.gcr.io/kube-controller-manager:v1.15.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.15.0  k8s.gcr.io/kube-scheduler:v1.15.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.15.0  k8s.gcr.io/kube-proxy:v1.15.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1  k8s.gcr.io/pause:3.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10  k8s.gcr.io/etcd:3.3.10
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1  k8s.gcr.io/coredns:1.3.1



初始化kubeadm :
在k8s-master 操作,ip为本机ip ,最好只用一个网卡,如有多个,最好以第一个网卡ip地址为初始化通信地址

#kubeadm init --apiserver-advertise-address=172.20.7.132 --apiserver-bind-port=6443 --kubernetes-version=v1.15.0  --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16   --ignore-preflight-errors=swap

sudo kubeadm init --kubernetes-version=v1.15.0 --pod-network-cidr 10.244.0.0/16



初始化正常后,会跳出node节点加入主节点的命令,如
kubeadm join 172.20.7.132:6443 --token yqj5ns.fd4tit6gg29q2l9i     --discovery-token-ca-cert-hash sha256:771b83763112ff988fd1c146aa94d3586c8883950c42813c2ef7839d3e4269d1

在主节点操作:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


安装flnnel插件
sudo  kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml  
##有时候能成功,有时候不能,可能网络问题
 

在k8s-node 节点操作
apt-get  install  openssh-server
systemctl   start  sshd 
允许root远程访问
vi  /etc/ssh/sshd_config
systemctl   restart  sshd
ufw    status

swapoff   -a 
vi  /etc/fstab   注释掉swap
mount -a

vim  /etc/apt/sources.list  配置阿里云的apt源
apt-get  update 
apt-get -y install apt-transport-https ca-certifi  cates curl software-properties-common
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg  | sudo apt-key add -

vim  /etc/hosts 添加主机名
hostnamectl  set-hostname k8s-node1

reboot

安装docker-ce 
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
apt-cache madison docker-ce
apt-get install docker-ce

添加加速器镜像源
systemctl daemon-reload
systemctl restart docker

k8s 源

curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg  | apt-key add -

apt-get update
vim  /etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial   main

apt-get update

apt-get install kubeadm=1.15.0-00 kubelet=1.15.0-00 kubectl=1.15.0-00


kubeadm config images list --kubernetes-version v1.15.0


拉取镜像
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.15.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.15.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.15.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1


重新打标签
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.0    k8s.gcr.io/kube-apiserver:v1.15.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.15.0   k8s.gcr.io/kube-controller-manager:v1.15.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.15.0  k8s.gcr.io/kube-scheduler:v1.15.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.15.0  k8s.gcr.io/kube-proxy:v1.15.0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1  k8s.gcr.io/pause:3.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10  k8s.gcr.io/etcd:3.3.10
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1  k8s.gcr.io/coredns:1.3.1


node 节点加入主节点
kubeadm join 172.20.7.132:6443 --token yqj5ns.fd4tit6gg29q2l9i     --discovery-token-ca-cert-hash sha256:771b83763112ff988fd1c146aa94d3586c8883950c42813c2ef7839d3e4269d1


systemctl   restart   docker 
systemctl   restart   kubelet.service 
systemctl   enable    kubelet.service



在主节点执行

kubectl   get  nodes  看节点是否ready

等待一段时间

在主节点操作 安装插件 falnnal
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

kubectl  get  po  -o wide


看pod 是否启动正常


在所有节点操作
iptables -P FORWARD ACCEPT

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF


sysctl --system


测试,服务是否自动发现
在主节点操作:
vim tt-nginx.yml 

apiVersion: v1
kind: Pod
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  containers:
  - name: nginx
    image: nginx
    ports:
    - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx
spec:
  ports:
  - port: 80
    targetPort: 80
    protocol: TCP
  selector:
    app: nginx
    
    
 执行 
 kubectl  apply  -f   tt-nginx.yml  
 
 kubectl   get  po -o wide  看服务是否正常启动
 kubectl   get  svc  -o wide   看service 
 

测试 nginx 是否正常
curl  pod-IP:80
curl   nginx-service-集群-ip:80  

另起pod
cat  curl-utils.yml 
apiVersion: v1
kind: Pod
metadata:
   name: curl-util
spec:
   containers:
   - name: curl-util
     image: radial/busyboxplus:curl
     command: 
     - sh 
     - -c 
     - while true ; do sleep 1 ;done 
 
 kubectl  apply -f   curl-utils.yml 
 
 看是否正常解析dns 解析service nginx
 kubectl  exec  curl-util  --  nslookup  nginx 
 
 
进入pod 进行查看
kubectl  exec  curl-util  --   curl  -s  nginx:80
kubectl  exec  -it    curl-util   /bin/sh
curl  -s nginx 

注意:系统重启后服务失败;必须操作

swapoff -a

 cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF

sysctl --system

不然服务自动发现失败



操作步骤历史

操作步骤:
 1  ll
    2  apt-get remove docker docker-engine docker.io
    3  sudo apt-get update
    4  swapoff   -a
    5  vi /etc/fstab 
    6  mount -a
    7  swapoff   -a
    8  apt-get update
    9  vi  /etc/apt/sources.list
   10  apt  install  vim
   11  vim  /etc/apt/sources.list
   12  apt-get -y install apt-transport-https ca-certifi  cates curl software-properties-common
   13  apt install curl
   14  curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg  | sudo apt-key add -
   15  ip a s
   16  vim  /etc/hosts
   17  hostnamectl  set-hostname k8s-master
   18  vim  /etc/hosts
   19  reboot
   20  apt-get  install  -y openssh-server
   21  systemctl  start  sshd
   22  systemctl  status  sshd
   23  systemctl  enable  sshd
   24  systemctl      --help
   25  service   --help
   26  vim  /etc/ssh/sshd_config .
   27  vim  /etc/ssh/sshd_config 
   28  vi  /etc/ssh/sshd_config 
   29  systemctl   restart   sshd
   30  cat  /etc/hosts
   31  ping  k8s-node1
   32  sudo apt-get update
   33  curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
   34  apt-key fingerprint 0EBFCD88
   35  apt-cache madison docker-ce
   36  apt-get install docker-ce
   37  systemctl daemon-reload
   38  systemctl restart docker
   39  systemctl    status  docker
   40  systemctl    enable    docker
   41  curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg  | apt-key add -
   42  apt-get update
   43  vim  /etc/apt/sources.list.d/kubernetes.list
   44  apt-get update
   45  apt-get install kubeadm=1.15.0-00 kubelet=1.15.0-00 kubectl=1.15.0-00
   46  ll
   47  ip a s
   48  kubeadm init --apiserver-advertiseaddress=172.20.7.132 --apiserver-bind-port=6443 --kubernetes-version=v1.15 \ 
   49  --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16 --service-dnsdomain=linux36.local \ 
   50  kubeadm init --apiserver-advertiseaddress=172.20.7.132 --apiserver-bind-port=6443 --kubernetes-version=v1.15  --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16 --service-dnsdomain=linux36.local --image-repository=registry.cnhangzhou.aliyuncs.com/google_containers  --ignore-preflight-errors=swap
   51  kubeadm init --apiserver-advertiseaddress 172.20.7.132 --apiserver-bind-port=6443 --kubernetes-version=v1.15  --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16 --service-dnsdomain=linux36.local --image-repository=registry.cnhangzhou.aliyuncs.com/google_containers  --ignore-preflight-errors=swap
   52  kubeadm init --apiserver-advertise-address=172.20.7.132 --apiserver-bind-port=6443 --kubernetes-version=v1.15  --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16 --service-dnsdomain=linux36.local --image-repository=registry.cnhangzhou.aliyuncs.com/google_containers  --ignore-preflight-errors=swap
   53  kubeadm init --apiserver-advertise-address=172.20.7.132 --apiserver-bind-port=6443 --kubernetes-version=v1.15  --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16   --image-repository=registry.cnhangzhou.aliyuncs.com/google_containers  --ignore-preflight-errors=swap
   54  kubeadm init --apiserver-advertise-address=172.20.7.132 --apiserver-bind-port=6443   --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16   --image-repository=registry.cnhangzhou.aliyuncs.com/google_containers  --ignore-preflight-errors=swap
   55  kubeadm config images pull
   56  kubeadm config images list --kubernetes-version v1.15.0
   57  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.0
   58  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.15.0
   59  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.15.0
   60  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.15.0
   61  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
   62  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10
   63  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1
   64  docker   images 
   65  kubeadm init --apiserver-advertise-address=172.20.7.132 --apiserver-bind-port=6443 --kubernetes-version=v1.15  --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16 --service-dnsdomain=linux36.local --image-repository=registry.cnhangzhou.aliyuncs.com/google_containers  --ignore-preflight-errors=swap
   66  kubeadm init --apiserver-advertise-address=172.20.7.132 --apiserver-bind-port=6443 --kubernetes-version=v1.15  --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16  --image-repository=registry.cnhangzhou.aliyuncs.com/google_containers  --ignore-preflight-errors=swap
   67  kubeadm init --apiserver-advertise-address=172.20.7.132 --apiserver-bind-port=6443   --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16  --image-repository=registry.cnhangzhou.aliyuncs.com/google_containers  --ignore-preflight-errors=swap
   68  docker  iamges 
   69  docker  images 
   70  docker  images   |  wc -l 
   71  docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.0    k8s.gcr.io/kube-apiserver:v1.15.0
   72  docker  images   |  wc -l 
   73  docker  images   
   74  kubeadm init --apiserver-advertise-address=172.20.7.132 --apiserver-bind-port=6443 --kubernetes-version=v1.15.0  --pod-network-cidr=10.10.0.0/16 --service-cidr=10.20.0.0/16   --ignore-preflight-errors=swap
   75  kubectl  get  nodes 
   76  kubectl  get  node 
   77  mkdir -p $HOME/.kube
   78  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
   79  sudo chown $(id -u):$(id -g) $HOME/.kube/config
   80  kubectl  get  nodes 
   81  docker  ps -a 
   82  kubectl  get po -o wide 
   83  kubectl  get  nodes 
   84  systemctl   restart  docker 
   85  systemctl   restart   kubelet.service  
   86  kubectl  get  nodes 
   87  ping k8s-node1 
   88  ll
   89  kubectl  get nods 
   90  kubectl  get nodes 
   91  systemctl    enable    kubelet.service  
   92  systemctl   enable   docker
   93  # kubectl get cs
   94  kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
   95  kubectl  get  po  -o wide 
   96  kubectl  get  node 
   97  kubectl  get  nodes 
   98  kubectl get  po  -n kube-system
   99  ll
  100  mkdir   /k8s
  101  ll
  102  cd  /k8s/
  103  ll
  104  # kubectl get cs
  105  vim  nginx.yml
  106  kubectl   apply   -f  nginx.yml 
  107  kubectl   get  po -owide 
  108  kubectl   get  svc  -o wide 
  109  curl  -s  10.20.107.245:80 
  110  curl    10.20.107.245:80 
  111  curl    10.20.107.245:80
  112  curl    10.20.107.245 
  113  kubectl  get  pod   -o wide 
  114  kubectl  exec -it nginx-7bffc778db-7wmnf /bin/sh
  115  kubectl  delete  -f  nginx.yml 
  116  kubectl  get po -o wide 
  117  kubectl  get  svc  -o wide 
  118  vim  nginx.yml 
  119  ll
  120  cat  nginx.yml 
  121  vim   nginx.yml 
  122  kubectl   apply   -f  nginx.yml 
  123  kubectl   get  po -o wide 
  124  kubectl   get svc  -o wide 
  125  curl  10.20.51.160:80 
  126  curl  10.10.1.5:80
  127  kubectl  logs  --help 
  128  kubectl logs my-nginx
  129  kubectl   get  po -o wide 
  130  kubectl logs my-nginx-756fb87568-s29z2
  131  kubectl logs -f -c    my-nginx-756fb87568-s29z2
  132  kubectl logs -f     my-nginx-756fb87568-s29z2
  133  kubectl   get po -o wide 
  134  telnet  10.10.1.5 80
  135  iptables   --help
  136  iptables  -vnL
  137  vim  curl-utils.yml 
  138  kubectl  apply  -f  curl-utils.yml 
  139  vim  curl-utils.yml 
  140  kubectl  apply  -f  curl-utils.yml 
  141  kubectl get po  -o wide 
  142  kubectl  exec  -it curl-util  --  nslookup  my-nginx 
  143  kubectl   get  po  -o wide 
  144  kubectl get svc  -o wide 
  145  kubectl get  po  -n kube-system
  146  kubectl exec  curl-util -- cat /etc/resolv.conf 
  147  kubectl exec  curl-util -- nslookup  my-nginx
  148  kubectl logs  my-nginx-756fb87568-s29z2
  149  kubectl logs  curl-util
  150  kubectl logs  kube-proxy-zkn8v
  151  kubectl logs  etcd-k8s-master
  152  kubectl logs  etcd-k8s-master  -n kube-system 
  153  kubectl get  po  -n kube-system
  154  kubectl logs  kube-proxy-zkn8v  -n kube-system 
  155  kubectl  delete  -f  nginx.yml 
  156  kubectl get  po  -n kube-system
  157  kubectl get  po   
  158  kubectl  get po -o wide 
  159  ll
  160  vim  nginx.yml 
  161  ll
  162  kubectl  get  po  -o wide 
  163  vim  tt.yml
  164  kubectl   get nodes  -o wide 
  165  kubectl run nginx --image=nginx --port=80
  166  kubectl  get  po -o wide 
  167  curl  -s  10.10.1.7
  168  curl  -s  10.10.1.7:80 
  169  kubectl  logs  nginx-7c45b84548-pfx6d 
  170  kubectl   delete  -f deployment.apps/nginx
  171  kubectl  delte  --help 
  172  kubectl  delete  --help 
  173  kubectl  get  po -o wide 
  174  kubectl delete pod  nginx-7c45b84548-pfx6d  --now
  175  kubectl  get  po -o wide 
  176  kubectl  desc   nginx-7c45b84548-5hxmw   -o  yaml
  177  kubectl  describe   nginx-7c45b84548-5hxmw   -o  yaml
  178  kubectl  describe   nginx-7c45b84548-5hxmw   
  179  kubectl  describe   nginx  
  180  kubectl    get po  -o wide 
  181  kubectl   describe  po  nginx-7c45b84548-5hxmw
  182  kubectl    get po  -o wide 
  183  kubectl   delete  pods nginx-7c45b84548-5hxmw
  184  kubectl    get po  -o wide 
  185  find  / -name "deployment.apps"
  186  find  / -name "*deployment.apps*"
  187  find  / -name "*deployment*"
  188  find  / -name "*nginx*"
  189  kubectl   delete  -f  nginx.yml 
  190  kubectl  delete  deployment    deployment.apps/nginx
  191  kubectl  get   deployment/deployment.apps/nginx
  192  kubectl  get   deployment.apps/nginx
  193  kubectl  delete   deployment.apps/nginx
  194  kubectl  get   deployment.apps/nginx
  195  kubectl  get po -o wide 
  196  kubectl   get po  -o wide 
  197  kubectl   get nodes 
  198  ll
  199  history 
  200  kubectl  get  nodes 
  201  ll
  202  iptables -P FORWARD ACCEPT
  203  cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF

  204  sysctl --system
  205  ll
  206  kubectl  delete  -f  curl-utils.yml 
  207  kubectl   get  po -o wide 
  208  vim  tt-nginx.yml
  209  kubectl  apply  -f  tt-nginx.yml 
  210  kubectl  get  po -o wide 
  211  kubectl  get  svc  -o wide 
  212  curl  10.10.1.10:80
  213  kubectl  apply  -f   curl-utils.yml 
  214  kubectl  get  po -o wide 
  215  kubectl  exec  curl-util  --  nslookup  nginx 
  216  kubectl  exec  curl-util  --   curl  -s  nginx:80 
  217  kubectl  exec  -it    curl-util   /bin/sh 
  218  ll
  219  history 
  220  kubectl   get  po  -o wide 
  221  kubectl  get svc  -o wide 
  222  curl  -s  10.20.101.222:80 
  223  kubectl   get  po  -o wide 
  224  curl  -s   10.10.1.10:80
  225  history





   node  节点
    1  ll
    2  ip a s
    3  apt-get  install  openssl-server
    4  apt-get  install  openss-server
    5  apt-get  install  openssh-server
    6  systemctl   start  sshd 
    7  ss  -tanl 
    8  getenforce
    9  apt  install  selinux-utils
   10  getenforce 
   11  vi  /etc/ssh/sshd_config 
   12  systemctl   restart  sshd
   13  ss  -tanl
   14  ll
   15  cat  /etc/ssh/sshd_config 
   16  ufw   stop 
   17  ufw    status 
   18  apt-get remove docker docker-engine docker.io
   19  sudo apt-get update
   20  swapoff   -a 
   21  swapoff   -a
   22  vi  /etc/fstab 
   23  mount -a
   24  swapoff   -a
   25  apt install  vim 
   26  vim  /etc/apt/sources.list
   27  apt-get -y install apt-transport-https ca-certifi  cates curl software-properties-common
   28  apt install curl
   29  curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg  | sudo apt-key add -
   30  vim  /etc/hosts
   31  hostnamectl  set-hostname k8s-node1
   32  vim  /etc/hosts
   33  reboot
   34  sudo apt-get update
   35  curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
   36  apt-key fingerprint 0EBFCD88
   37  apt-cache madison docker-ce
   38  apt-get install docker-ce
   39  cd  /etc/docker/
   40  ;;
   41  ll
   42  systemctl daemon-reload
   43  systemctl restart docker
   44  systemctl    status  docker
   45  systemctl    enable    docker
   46  curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg  | apt-key add -
   47  apt-get update
   48  vim  /etc/apt/sources.list.d/kubernetes.list
   49  apt-get update
   50  apt-get install kubeadm=1.15.0-00 kubelet=1.15.0-00 kubectl=1.15.0-00
   51  kubeadm config images list --kubernetes-version v1.15.0
   52  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.0
   53  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.15.0
   54  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.15.0
   55  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.15.0
   56  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
   57  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.3.10
   58  docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1
   59  docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.15.0    k8s.gcr.io/kube-apiserver:v1.15.0
   60  docker  images 
   61  cd 
   62  kubeadm join 172.20.7.132:6443 --token yqj5ns.fd4tit6gg29q2l9i     --discovery-token-ca-cert-hash sha256:771b83763112ff988fd1c146aa94d3586c8883950c42813c2ef7839d3e4269d1
   63  systemctl   restart   docker 
   64  systemctl    status  kubelet.service
   65  kubectl  get  nodes 
   66  ss  -tanl 
   67  systemctl   restart  docker
   68  systemctl   restart   kubelet.service 
   69  systemctl    enable    kubelet.service
   70  systemctl   enable   docker
   71  ping  k8s-master
   72  kubectl   get  po 
   73  docker  ps -a 
   74  docker ps -a 
   75  docker  ps -a 
   76  docker  images 
   77  iptables -P FORWARD ACCEPT
   78  cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF

   79  sysctl --system
   80  history

二 ,kubadm 升级

主节点升级:

   升级流程:
   升级 k8s 集群必须 先升级 kubeadm 版本到目的 k8s 版本
   确定升级的版本
   apt-cache madison kubeadm 查看可用版本

查看当前的版本

 kubeadm version
 安装具体版本
 apt-get install kubeadm=1.13.6-00    安装的版本示例为1.13.8  或1.13.9  等,当前版本的小版本最大值

 验证
  kubeadm version #验证版本

  升级计划:
  # kubeadm upgrade plan #查看升级计划

  升级:
  kubeadm upgrade apply v1.13.6


查看版本
  kubectl get nodes
  kubeadm    version 
  kubectl    version 

  升级kubectl
 kubeadm upgrade node config --kubelet-version 1.13.6 
  
 apt-get install kubelet=1.13.6-00   kubectl=1.13.6-00 



node节点:
  apt-get install kubelet=1.13.6-00   kubeadm=1.13.6-00      kubectl=1.13.6-00 


##只能够进行小版本的升级,并且升级过程不能够跳级,比如从1.y到1.y+1,而不能够从1.y到1.y+2
###以下参考:
https://juejin.im/post/5c9ce517e51d452b837c959e

操作步骤
1,目前版本
kubectl get nodes
NAME            STATUS   ROLES    AGE    VERSION
192-168-10-14   Ready    master   36h    v1.13.0
192-168-10-18   Ready    <none>   103d   v1.13.0
192-168-10-21   Ready    master   104d   v1.13.0

 先在测试集群升级测试

查看服务配置
systemctl status kubelet  看服务的配置位置

loaded (/etc/systemd/system/kubelet.service

查看配置文件 
cat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

2,备份重要文件
/etc/kubernetes/bootstrap-kubelet.conf (可能并不存在,没有也没有关系)
/etc/kubernetes/kubelet.conf
/var/lib/kubelet/kubeadm-flags.env
/etc/sysconfig/kubelet

 
##升级主控点
查看是否有kubeadm 的高版本,

安装新版本 kubeadm  
yum install -y kubeadm-1.14.0-0 --disableexcludes=kubernetes

查看升级方案
kubeadm upgrade plan


升级kubeadm  到新版
kubeadm upgrade apply v1.14.0

看下网络部分是否需要升级
kubectl get pods -n kube-system
...
kubectl describe pod/kube-flannel-ds-amd64-5xxh7 -n kube-system
...
Image:         quay.io/coreos/flannel:v0.11.0-amd64


升级kubectl  kubelet 
yum install -y kubelet-1.14.0-0 kubectl-1.14.0-0 --disableexcludes=kubernetes

重启kubelet 
systemctl restart kubelet
systemctl daemon-reload

systemctl restart kubelet

检查升级结果:
kubectl get nodes


业务节点升级:

临时备份:
业务节点临时增加污点,防止升级期间调度

1,安装kubeadm  工具
2,升级到指定版本
kubeadm upgrade node config --kubelet-version v1.14.0
3,升级kubectl和kubelet
重启服务  kubelet 

4, 还原临时备份
先取消业务节点污点

然后还原master节点



这个网站不错
https://github.com/fanux/sealos

问题

1,网络问题导致节点notReady
打印node中日志 journalctl -f

2,kubeletNotReady runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized
处理:
原因:
因为kubelet配置了network-plugin=cni,但是还没安装,所以状态会是NotReady,不想看这个报错或者不需要网络,就可以修改kubelet配置文件,去掉network-plugin=cni 就可以了。

解决方法:

vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

删除最后一行里的$KUBELET_NETWORK_ARGS
1.11.2版本的封装在/var/lib/kubelet/kubeadm-flags.env文件中

使用命令:
[root@k8s ~]# cat /var/lib/kubelet/kubeadm-flags.env
KUBELET_KUBEADM_ARGS=--cgroup-driver=systemd --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --network-plugin=cni

重启kubelet:
systemctl enable kubelet && systemctl start kubelet

重新初始化:
kubeadm reset
kubeadm init --kubernetes-version=v1.11.2 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.11.90 --token-ttl 0

flannel插件的yaml

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups: ['extensions']
    resources: ['podsecuritypolicies']
    verbs: ['use']
    resourceNames: ['psp.flannel.unprivileged']
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - amd64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - arm64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-arm64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-arm64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - arm
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-arm
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-arm
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-ppc64le
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - ppc64le
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-ppc64le
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-ppc64le
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-s390x
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: beta.kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - s390x
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.11.0-s390x
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-s390x
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg


END

原文地址:https://www.cnblogs.com/g2thend/p/11616534.html