openstack-往已有集群中添加控制节点,实现控制节点的高可用

新添加的controller节点基础环境准备

1、yum install centos-release-openstack-train.noarch -y  #安装T版yum源

2、yum install python-openstackclient openstack-selinux -y  #安装openstack客户端命令和selinux

3、yum install python2-PyMySQL -y   #安装py程序连接mysql所需要的模块

4、yum install python-memcached -y  #安装py程序连接memcache所需要的模块

5、scp 172.31.7.101:/root/{admin-openrc.sh,demo-openrc.sh} /root/
#将controller1上admin和myuser用户的环境变量拷贝到当前controller2节点上

controller-安装keystone

1、yum install openstack-keystone httpd mod_wsgi -y  #安装kyestone服务

##到之前已经部署好的controller节点上,把已经部署好的keystone的配置文件目录进行打包,拷贝到当前controller节点上
(1)、cd /etc/keystone/
(2)、tar czvf keystone-controller1.tar.gz ./*
(3)、scp keystone-controller1.tar.gz 172.31.7.102:/etc/keyston

2、cd /etc/keystone/

3、tar xvf keystone-controller1.tar.gz

4、vim /etc/httpd/conf/httpd.conf
ServerName 172.31.7.102:80    #让servername监听本机地址(主站点)

5、vim /etc/hosts
172.31.7.248 openstack-vip.linux.local

6、ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

7、systemctl start httpd.service

8、systemctl enable httpd.service


##到controller2节点进行测试,关闭haproxy上controller1节点的5000端口,通过controller2节点5000端口进行测试
1、. admin-openrc.sh 

2、neutron agent-list   #能通过keystone认证后,从mysql中获取到数据即配置正确

controller-安装glance

1、yum install openstack-glance -y

##到之前已经部署好的controller节点上,把已经部署好的glance的配置文件目录进行打包,拷贝到当前controller节点上
(1)、cd /etc/glance/
(2)、tar czvf glance-controller1.tar.gz ./*
(3)、scp glance-controller1.tar.gz 172.31.7.102:/etc/glance/

2、cd /etc/glance/

3、tar xvf glance-controller1.tar.gz

4、systemctl start openstack-glance-api.service

5、systemctl enable openstack-glance-api.service

6、vim /etc/fstab     #将NFS存放镜像的目录挂载到当前controller2节点存放镜像的目录下
172.31.7.105:/data/glance/ /var/lib/glance/images/ nfs defaults,_netdev 0 0

7、mount -a 
#注意 /var/lib/glance/images/ 目录权限


##到controller2节点进行测试,关闭haproxy上controller1节点的9292端口,通过controller2节点9292端口进行测试
1、openstack image list   #通过keystone认证后,是否能够获取到镜像

controller-安装placement

1、yum install openstack-placement-api -y

##到之前已经部署好的controller节点上,把已经部署好的placement的配置文件目录进行打包,拷贝到当前controller节点上
(1)、cd /etc/placement/
(2)、tar czvf placement-controller1.tar.gz ./*
(3)、scp placement-controller1.tar.gz 172.31.7.102:/etc/placement/

2、cd /etc/placement/

3、tar xvf placement-controller1.tar.gz

4、vim /etc/httpd/conf.d/00-placement-api.conf   #下面内容添加到配置文件的最后
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>

5、systemctl restart httpd


##到controller2节点进行测试,关闭haproxy上controller1节点的8778端口,通过controller2节点8778端口进行测试
1、placement-status upgrade check  #查看状态是否是success

controller-安装nova

1、yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-schedule -y
  
##到之前已经部署好的controller节点上,把已经部署好的nova的配置文件目录进行打包,拷贝到当前controller节点上
(1)、cd /etc/nova/
(2)、tar czvf nova-controller1.tar.gz ./*
(3)、scp nova-controller1.tar.gz 172.31.7.102:/etc/nova/

2、cd /etc/nova/

3、tar xvf nova-controller1.tar.gz

4、grep "172" ./* -R   #查看有哪些配置需要修改
./nova.conf:server_listen = 172.31.7.101
./nova.conf:server_proxyclient_address = 172.31.7.101

5、vim nova.conf
[vnc]
server_listen = 172.31.7.102   #指定vnc服务端监听地址都为controller2本机地址
server_proxyclient_address = 172.31.7.102

6、systemctl start 
    openstack-nova-api.service 
    openstack-nova-scheduler.service 
    openstack-nova-conductor.service 
    openstack-nova-novncproxy.service

7、systemctl enable 
    openstack-nova-api.service 
    openstack-nova-scheduler.service 
    openstack-nova-conductor.service 
    openstack-nova-novncproxy.service
    
8、tail -f /var/log/nova/*.log  #日志中不能有任何报错


##到controller2节点进行测试,关闭haproxy上controller1节点的8774和6080端口,通过controller2节点8774和6080端口进行测试
1、nova service-list  #列出nova的所有服务,并且状态必须是UP

controller-安装neutron

1、yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y

##到之前已经部署好的controller节点上,把已经部署好的neutron的配置文件目录进行打包,拷贝到当前controller节点上
(1)、cd /etc/neutron/
(2)、tar czvf neutron-controller1.tar.gz ./*
(3)、scp neutron-controller1.tar.gz 172.31.7.102:/etc/neutron/

2、cd /etc/neutron/

3、tar xvf neutron-controller1.tar.gz

4、vim /etc/sysctl.conf    #添加内核参数
net.bridge.bridge-nf-call-iptables =1
net.bridge.bridge-nf-call-ip6tables =1

5、vim /usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
     metric = 100               #第400行
     #if 'metric' in gateway:   #注释掉这两行,否则brq网桥设备无法自动绑定eth0网卡
     #    metric = gateway['metric'] - 1
     
6、systemctl start neutron-server.service 
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service 
  neutron-metadata-agent.service

7、sysctl -p

8、systemctl enable neutron-server.service 
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service 
  neutron-metadata-agent.service  

9、tail -f /var/log/neutron/*.log   #日志中不能有任何报错


##到controller2节点进行测试,关闭haproxy上controller1节点的9696端口,通过controller2节点9696端口进行测试
1、neutron agent-list  #列出neutron的所有服务,并且状态必须是true

controller-安装dashboard

1、yum install openstack-dashboard -y

##到之前已经部署好的controller节点上,把已经部署好的dashboard的配置文件目录进行打包,拷贝到当前controller节点上
(1)、cd /etc/openstack-dashboard/
(2)、tar zcvf openstack-dashboard-controller1.tar.gz ./*
(3)、scp openstack-dashboard-controller1.tar.gz 172.31.7.102:/etc/openstack-dashboard/

2、cd /etc/openstack-dashboard/

3、tar xvf openstack-dashboard-controller1.tar.gz

4、grep "172" ./* -R
./local_settings:ALLOWED_HOSTS = ['172.31.7.101', 'openstack-vip.linux.local']
./local_settings:OPENSTACK_HOST = "172.31.7.101"

5、vim local_settings
ALLOWED_HOSTS = ['172.31.7.102', 'openstack-vip.linux.local']
OPENSTACK_HOST = "172.31.7.102"

6、systemctl restart httpd

7、tail -f /var/log/httpd/*.log   #日志中不能有任何报错


##到controller2节点进行测试,关闭haproxy上controller1节点的80端口,通过controller2节点80端口进行测试
1、http://172.31.7.102/dashboard   #浏览器访问,账号密码都可以用admin或myuser,
原文地址:https://www.cnblogs.com/dongzhanyi123/p/13300961.html