kubernetes集群搭建

工作环境:

主机名 IP 系统
master 192.168.199.6 rhel7.4
node1 192.168.199.7 rhel7.4
node2 192.168.199.8 rhel7.4

[root@master ~]#yum install kubernetes etcd flannel -y

[root@node1 ~]#yum install kubernetes flannel -y

[root@node2 ~]#yum install kubernetes flannel -y

[root@master ~]# vim /etc/etcd/etcd.conf
  6 ETCD_LISTEN_CLIENT_URLS="http://localhost:2379,http://192.168.199.6:2379"
 21 ETCD_ADVERTISE_CLIENT_URLS="http://localhost:2379,http://192.168.199.6:2379"
[root@master ~]# systemctl start etcd

[root@master ~]# netstat  -autup|grep 2379
tcp        0      0 localhost:2379          0.0.0.0:*               LISTEN      5442/etcd           
tcp        0      0 master:2379             0.0.0.0:*               LISTEN      5442/etcd           
tcp        0      0 master:2379             master:51468            ESTABLISHED 5442/etcd           
tcp        0      0 localhost:2379          localhost:38090         ESTABLISHED 5442/etcd           
tcp        0      0 master:51468            master:2379             ESTABLISHED 5442/etcd           
tcp        0      0 master:51456            master:2379             TIME_WAIT   -                   
tcp        0      0 localhost:38090         localhost:2379          ESTABLISHED 5442/etcd 
[root@master ~]# systemctl enable etcd

检查etcd cluster状态
[root@master ~]# etcdctl cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://192.168.199.6:2379
cluster is healthy

检查etcd集群成员列表

[root@master ~]# etcdctl member list
8e9e05c52164694d: name=default peerURLs=http://localhost:2380 clientURLs=http://192.168.199.6:2379,http://localhost:2379 isLeader=true

[root@master ~]# vim /etc/kubernetes/config
 22 KUBE_MASTER="--master=http://192.168.199.6:8080"
[root@master ~]# vim /etc/kubernetes/apiserver
  8 KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
 17 KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.199.6:2379"
 23 KUBE_ADMISSION_CONTROL="--admission-control=AlwaysAdmit"
 [root@master ~]# vim /etc/kubernetes/controller-manager 该文件不用修改
[root@master ~]# vim /etc/kubernetes/scheduler
  7 KUBE_SCHEDULER_ARGS="--address=0.0.0.0"

[root@master ~]# vim /etc/sysconfig/flanneld
  4 FLANNEL_ETCD_ENDPOINTS="http://192.168.199.6:2379"
  8 FLANNEL_ETCD_PREFIX="/k8s/network"
 11 FLANNEL_OPTIONS="--iface=ens32"

[root@master ~]# systemctl restart flanneld
   [root@master ~]# etcdctl set /k8s/network/config '{"network":"10.255.0.0/16"}'
   {"network":"10.255.0.0/16"}
   [root@master ~]# etcdctl get /k8s/network/config
   {"network":"10.255.0.0/16"}
[root@master ~]# systemctl restart kube-apiserver kube-controller-manager kube-scheduler.service
[root@master ~]# systemctl status kube-apiserver kube-controller-manager kube-scheduler.service
[root@master ~]# systemctl enable kube-apiserver kube-controller-manager kube-scheduler
[root@master ~]# systemctl enable flanneld

node1

[root@node1 ~]# vim /etc/sysconfig/flanneld
  4 FLANNEL_ETCD_ENDPOINTS="http://192.168.199.6:2379"
  8 FLANNEL_ETCD_PREFIX="/k8s/network"
 11 FLANNEL_OPTIONS="--iface=ens33"
[root@node1 ~]# ps -aux|grep flanneld
root      10467  0.2  1.7 375996 31932 ?        Ssl  20:33   0:00 /usr/bin/flanneld -etcd-endpoints=http://192.168.199.6:2379 -etcd-prefix=/k8s/network --iface=ens33
root      10615  0.0  0.0 112676   984 pts/1    S+   20:35   0:00 grep --color=auto flanneld
[root@node1 ~]#
[root@node1 ~]# vim /etc/kubernetes/config
 22 KUBE_MASTER="--master=http://192.168.199.6:8080"

 [root@node1 ~]# vim /etc/kubernetes/kubelet
   5 KUBELET_ADDRESS="--address=0.0.0.0"
 11 KUBELET_HOSTNAME="--hostname-override=node1"
 14 KUBELET_API_SERVER="--api-servers=http://192.168.199.6:8080"
[root@node1 ~]# systemctl restart flanneld kube-proxy.service kubelet docker
[root@node1 ~]# systemctl enable flanneld kube-proxy.service kubelet docker
[root@node1 ~]# systemctl status flanneld kube-proxy.service kubelet docker

[root@node1 ~]# ifconfig

flannel0: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST>  mtu 1472
        inet 10.255.4.0  netmask 255.255.0.0  destination 10.255.4.0
        inet6 fe80::d5dd:8c9b:48b6:5cb5  prefixlen 64  scopeid 0x20<link>
        unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00  txqueuelen 500  (UNSPEC)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 3  bytes 144 (144.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

node2:  

node2的配置和node1的一样,所以只需要把node1的配置文件复制过去即可。
[root@node1 ~]#scp /etc/sysconfig/flanneld 192.168.199.8:/etc/sysconfig/
[root@node2 ~]# vim /etc/sysconfig/flanneld
# Flanneld configuration options  
# etcd url location.  Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.199.6:2379"
# etcd config key.  This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/k8s/network"
# Any additional options that you want to pass
FLANNEL_OPTIONS="--iface=ens33"    自己本地的网卡,若网卡不是ens33的话自己修改一下
[root@node1 ~]#scp /etc/kubernetes/config 192.168.199.8:/etc/kubernetes/
[root@node1 ~]#scp /etc/kubernetes/proxy 192.168.199.8:/etc/kubernetes/
[root@node1 ~]#scp /etc/kubernetes/kubelet  192.168.199.8:/etc/kubernetes/
[root@node2 ~]# vim /etc/kubernetes/kubelet
 11 KUBELET_HOSTNAME="--hostname-override=node2"   修改为自己的主机名
[root@node2 ~]#systemctl restart flanneld kube-proxy kubelet docker     
[root@node2 ~]#systemctl enable flanneld kube-proxy kubelet docker
[root@node2 ~]#systemctl status flanneld kube-proxy kubelet docker
完成之后再master上查看kubernetes集群的状态,这样集群就做好了。   

在主上查看集群状态:

[root@master log]# kubectl get nodes
NAME      STATUS    AGE
node1     Ready     7m
node2     Ready     1m

原文地址:https://www.cnblogs.com/winter1519/p/9946012.html