SaltStack实战

1.生产环境建议

1.不建议使用salt的File模块进行:目录管理  进行代码部署
      命令编排的状态管理:压缩包、file.managed  cmd.run 执行部署

2.配置管理,不建议使用salt管理项目的配置文件
       分层管理:salt只管理服务  例如nginx tomcat apache

3.如果你有固定的文件服务器,可以使用source: salt:// http:// ftp://

4.SLS版本化
  1.创建一个git项目
  2.找一个测试环境,编写SLS,测试,git commit && git push到版本仓库
  3.生产环境git pull,测试,全部执行

5.使用Master 对 Job Cache保存job的输出到Mysql;
    cache文件保存在/var/cache/salt,可以通过filebeat之类写入数据库

2.执行模块的目标选择

# returners 模块
https://www.unixhot.com/docs/saltstack/ref/returners/index.html

# Job cache
# 所有salt操作存入数据库
https://www.unixhot.com/docs/saltstack/ref/returners/all/salt.returners.mysql.html#module-salt.returners.mysql

vi /etc/salt/master中增加
master_job_cache:'mysql’
mysql.host: 'salt'
mysql.user: 'salt'
mysql.pass: 'salt'
mysql.db: 'salt'
mysql.port: 3306

新建数据库与表并赋予权限
CREATE DATABASE  `salt`
  DEFAULT CHARACTER SET utf8
  DEFAULT COLLATE utf8_general_ci;

USE `salt`;

--
-- Table structure for table `jids`
--

DROP TABLE IF EXISTS `jids`;
CREATE TABLE `jids` (
  `jid` varchar(255) NOT NULL,
  `load` mediumtext NOT NULL,
  UNIQUE KEY `jid` (`jid`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE INDEX jid ON jids(jid) USING BTREE;

--
-- Table structure for table `salt_returns`
--

DROP TABLE IF EXISTS `salt_returns`;
CREATE TABLE `salt_returns` (
  `fun` varchar(50) NOT NULL,
  `jid` varchar(255) NOT NULL,
  `return` mediumtext NOT NULL,
  `id` varchar(255) NOT NULL,
  `success` varchar(10) NOT NULL,
  `full_ret` mediumtext NOT NULL,
  `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
  KEY `id` (`id`),
  KEY `jid` (`jid`),
  KEY `fun` (`fun`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

--
-- Table structure for table `salt_events`
--

DROP TABLE IF EXISTS `salt_events`;
CREATE TABLE `salt_events` (
`id` BIGINT NOT NULL AUTO_INCREMENT,
`tag` varchar(255) NOT NULL,
`data` mediumtext NOT NULL,
`alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
`master_id` varchar(255) NOT NULL,
PRIMARY KEY (`id`),
KEY `tag` (`tag`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
salt

# 查询表salt_returns可以看出是空表,执行salt命令后再次查看:可以在表中看到自己执行的命令详细经过以及自己执行的命令

SaltStack 二次开发
  1.Master Job cache将所有的job输出保存到MySQL
  2.如果做管理平台,可以将User id和Jid做关联
  3.使用List做目标选择

 3.saltstack远程执行---选择目标

salt '<target>' <function> [arguments]

# 例如:
salt '*' test.ping
salt '*.example.org' test.ping
salt -G 'os:Ubuntu' test.ping
salt 'web-[x-z]' test.ping
salt -L 'foo,bar,baz,quo' test.ping
# 正则(常用)
salt -E 'web1-(prod|devel)' test.ping
# topfile文件
base:
  'web1-(prod|devel)':
  - match: pcre  # 表明是正则表达式匹配
  - webserver

# 使用IP地址(游戏,常用)
salt -S 192.168.0.111 test.ping
salt -S 192.168.0.0/24 test.ping
# 混合匹配,不建议使用
https://www.unixhot.com/docs/saltstack/topics/targeting/compound.html
# nodegroups 分组
# 批处理(常用)
salt '*' -b 10 test.ping # 每10台执行后再执行下一批
salt -G 'os:RedHat' --batch-size 25% apache.signal restart # 机器太多,可以设置百分比,每次总体机器的2%5执行命令,知道轮询完成

 4.saltsatack远程执行---执行模块

https://www.unixhot.com/docs/saltstack/ref/modules/all/salt.modules.network.html#module-salt.modules.network

# 1.活动的tcp/ip链接
salt '*' network.active_tcp
# 2.连通性测试
salt '*' network.connect archlinux.org 80
salt '*' network.connect archlinux.org 80 timeout=3
salt '*' network.connect archlinux.org 80 timeout=3 family=ipv4
salt '*' network.connect google-public-dns-a.google.com port=53 proto=udp timeout=3

# 获取所有服务
salt '*' service.get_all

 5.SaltStack日常管理技巧:include

/srv/salt/base/web

之前安装lamp的时候,我们将所有的模块安装都集中写在一起,现在我们可以通过include将其分开,需要的时候通过include进行导入

# jdk.sls
jdk-install:
  pkg.installed:
    - pkgs:
      - ca-certificates
      - java-1.8.0-openjdk

# tomcat.sls
include:
  - web.jdk 

tomcat-install:
  file.managed:
    - name: /usr/local/src/apache-tomcat-8.5.72.tar.gz
    - source: salt://web/files/apache-tomcat-8.5.72.tar.gz
    - user: root
    - group: root
    - mode: 755
  cmd.run:
    - name: cd /usr/local/src && tar zxf apache-tomcat-8.5.72.tar.gz && mv apache-tomcat-8.5.72 /usr/local/ && ln -s /usr/local/apache-tomcat-8.5.72 /usr/local/tomcat
    - unless: test -L /usr/local/tomcat && test -d /usr/local/apache-tomcat-8.5.72

执行命令:

salt 'node1' state.sls web.tomcat  # 一样可以执行成功
salt 'node1' state.sls web.tomcat test=True # 测试命令

salt状态管理:salt-run

[root@Master web]# salt-run manage.status
down:
up:
    - Master
    - node1
    - node3

# 版本管理

[root@Master web]# salt-run manage.versions
Master:
3004
Up to date:
----------
Master:
3004
node1:
3004
node3:
3004

修改minion_id

1.停止minion服务
2.salt-key -d minion_id  删除minion
3.rm -f /etc/salt/minion_id
4.rm -rf /etc/salt/pki
5.修改配置文件id
6.启动minion

 6.无master架构,本地管理,单机使用

# node3
vim /etc/salt/minion
file_client: local

# 将file_roots打开
# 停止salt-minion,依然可以使用salt管理

# 执行命令的方式变了,其余的没有变
salt-call --local state.sls web.tomcat

 7.saltstack案例实战:zabbix-agent安装

#目录结构划分
base 所有机器共用的和系统初始化的,一般放在这个目录里
 cd /srv/salt/base/
mkdir init/files zabbix/files logstash -p
cd init/files
wget http://mirrors.aliyun.com/repo/epel-7.repo

# epel源配置文件编写,init目录下
vim yum-repo.sls
/etc/yum.repos.d/epel-7.repo:
  file.managed:
    - source: salt://init/files/epel-7.repo
    - user: root
    - group: root
    - mode: 644

# zabbix目录下
vim zabbix-agent.sls
include:
  - init.yum-repo

zabbix-agent:
  pkg.installd:
    - name: zabbix50-agent
  file.managed:
    - name: /etc/zabbix_agentd.conf
    - source: salt://zabbix/files/zabbix_agentd.conf
    - user: root
    - group: root
    - mode: 644
    - template: jinja
    - defaults:
      ZABBIX_SERVER: 192.168.0.110
      AGENT_HOSTNAME: {{ grains['fqdn'] }}
    - require:
      - pkg: zabbix-agent
  service.running:
    - name: zabbix-agent
    - enable: True
    - watch:
      - file: zabbix-agent
      - pkg: zabbix-agent

zabbix_agentd.conf.d:
  file.directory:
    - name: /etc/zabbix_agentd.conf.d
    - watch_in:
      - service: zabbix-agent
    - require:
      - pkg: zabbix-agent
      - file: /etc/zabbix_agentd.conf

vim zabbix/files/zabbix_agentd.conf
# 注意变量不能用 - 所以用下划线
Server={{ ZABBIX_SERVER }}
# hostname自动获取本机主机名
Hostname={{ AGENT_HOSTNAME }}
Include=/etc/zabbix_agentd.conf.d/

# 测试
salt 'node3' state.sls init.yum-repo test=True
salt 'node3' state.sls zabbix.zabbix-agent test=True
# 如果测试没问题就开始执行
salt 'node3' state.sls zabbix.zabbix-agent

8.系统初始化:

1.DNS  file.managed
2.防火墙  service.dead
3.selinux  file.managed
4.limit设置调优  file.managed
5.SSH useDNS设置,改端口 file.managed
6.sysctl内核参数调优 sysctl
7.关闭不需要的服务 service
8.时间同步  file.managed cron
9.yum源   file.managed
10.基础软件包 pkg.install

9.saltstack案例实战:redis部署

# yum安装redis,修改redis.conf
bind 0.0.0.0  或者 {{ BIND_IP }}
daemon yes
maxmemory {{  REDIS_MEM}}

# 配置文件
# 目录新建
mkdir -p prod/modules/{apache,haproxy,keepalived,mysql,redis}
# moduls里只放安装程序
vim prod/redis/redis-install.sls
redis-install:
  pkg.installed:
    - name: redis

# 配置文件以及服务启动
mkdir -p prod/redis-cluster/files
vim redis-cluster/redis-master.sls
include:
  - modules.redis.redis-install

redis-master-config:
  file.managed:
    - name: /etc/redis.conf
    - source: salt://redis-cluster/files/redis-master.conf
    - user: root
    - group: root
    - mode: 644
    - template: jinja
    - defaults:
      REDIS_MEM: 500M
      BIND_IP: {{ grains['fqdn'] }}

redis-master-service:
  service.running:
    - name: redis
    - enable: True
    - watch:
      - file: redis-master-config

# 测试,默认路径是base
salt 'node3' state.sls redis-cluster.redis-master test=True saltenv=prod

10.saltstack项目案例---目录设计与规划

生产环境按项目走
modules/  基础状态
  - files

redis-cluster 公用服务
  - files

sms/
  - redis-cluster/redis-master.sls redis-slave.sls
  - redis-cluster/mysql-master.sls mysql-slave.sls
  - nginx

shop/

user/

11.Salt-ssh介绍

# 在不装 salt-minion的情况下,如何操作服务器
yum -y install salt-ssh

# 如果有多个服务器,就复制多个
vi /etc/salt/roster
node2:  # hostname
  host: 192.168.0.112  # 服务器IP、账号密码端口
  user: root
  passwd: 1111111
  port: 22

node4:  # hostname
  host: 192.168.0.137
  user: root
  passwd: 1111111
  port: 22

# 执行命令,-r 执行原生的linux命令
[root@Master salt]# salt-ssh '*' test.ping
node2:
    True
[root@Master salt]# salt-ssh '*' -r 'w'
node2:
    ----------
    retcode:
        0
    stderr:
    stdout:
        root@192.168.0.112's password: 
         23:41:02 up 1 day, 10:57,  2 users,  load average: 0.25, 0.11, 0.08
        USER     TTY      FROM             LOGIN@   IDLE   JCPU   PCPU WHAT
        root     tty1                      233月21 22days  0.16s  0.16s -bash
        root     pts/0    192.168.0.125    13:43    3:58   0.15s  0.15s -bash

12.Salt-api介绍

# salt-api需要使用https链接
https://www.unixhot.com/docs/saltstack/ref/netapi/all/salt.netapi.rest_cherrypy.html#a-rest-api-for-salt

# 安装 salt-api
yum install salt-api -y
yum list | grep PyOpenSSL
yum install pyOpenSSL -y

# 生成自签名证书
salt-call --local tls.create_self_signed_cert  # 如果有报错,可以使用另外一种方式生成

# 配置证书
vi /etc/salt/master
default_include: master.d/*.conf

# 配置ssl证书
vim master.d/api.conf
rest_cherrypy:
  host: 192.168.0.110
  port: 8000
  ssl_crt: /etc/pki/tls/certs/localhost.crt
  ssl_key: /etc/pki/tls/certs/localhost.key

# 配置
vim master.d/auth.conf
external_auth:
  pam:
    saltapi:    # 能操作什么
      - .*
      - '@wheel'
      - '@runner'
      - '@jobs'

systemctl restart salt-master
systemctl start salt-api

验证:

# 验证login
curl -sSk https://192.168.0.110:8000/login \
    -H 'Accept: application/x-yaml' \
    -d username='saltapi' \
    -d password='saltapi' \
    -d eauth=pam

# 执行命令,token修改下就行
curl -sSk https://192.168.0.110:8000 \
    -H 'Accept: application/x-yaml' \
    -H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\
    -d client=local \
    -d tgt='*' \
    -d fun=test.ping

# 加参数: -d
curl -sSk https://192.168.0.110:8000 \
    -H 'Accept: application/x-yaml' \
    -H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\
    -d client=local \
    -d tgt='*' \
    -d fun=cmd.run -d arg='uptime'

# 单独操作,返回所有的grains
curl -sSk https://192.168.0.110:8000/minions/node1 \
    -H 'Accept: application/x-yaml' \
    -H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'

 报错:'tls' __virtual__ returned False: PyOpenSSL version 0.10 or later must be installed before this module can be used.

可能原因:python版本不一致

可以使用另外一种方式生成ssl证书

# 第一步
[root@Master private]# openssl rsa -in localhost.key -out localhost_nopass.key
Enter pass phrase for localhost.key:  # 输入2次一样密码
writing RSA key

# 第二步
[root@Master certs]# umask 77;\
> /usr/bin/openssl req -utf8 -new -key /etc/pki/tls/private/localhost.key -x509 -days 365 -out /etc/pki/tls/certs/localhost.crt -set_serial 0
Enter pass phrase for /etc/pki/tls/private/localhost.key:
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
# 选择输入
Country Name (2 letter code) [XX]:CN  # 国家
State or Province Name (full name) []:Shanghai  # 地区
Locality Name (eg, city) [Default City]:Shanghai     
Organization Name (eg, company) [Default Company Ltd]:
Organizational Unit Name (eg, section) []:
Common Name (eg, your name or your server's hostname) []:xc
Email Address []:          

# 第三步
[root@Master private]# openssl rsa -in localhost.key -out localhost_nopass.key
Enter pass phrase for localhost.key:
writing RSA key
[root@Master private]# ls
localhost.key  localhost_nopass.key

13.Salt Master高可用

# minion 配置
vim /etc/salt/minion
master:
  - 192.168.0.110
  - 192.168.0.136

# 主从master配置
/etc/salt/master 配置文件一致
/etc/salt/pki/master  公钥私钥一致,可以采用挂nfs的方式
/srv/salt # master file_roots一样
/etc/salt/minion # 配置2个master并重启
# nfs 安装
yum install nfs-utils rpcbind

# 配置
vim /etc/exports
/etc/salt/pki/master 192.168.0.136 *(rw,sync,no_root_squash,no_all_squash)
/srv/salt 192.168.0.136 *(rw,sync,no_root_squash,no_all_squash)
# node3上执行
[root@node3 salt]# showmount -e 192.168.0.110
Export list for 192.168.0.110:
/srv/salt            (everyone)
/etc/salt/pki/master (everyone)
[root@node3 salt]# showmount -a 192.168.0.110
All mount points on 192.168.0.110:

# node3 挂载目录
[root@node3 pki]# mount -t nfs 192.168.0.110:/etc/salt/pki/master /etc/salt/pki/master  # 权限有点问题,自己调整;
[root@node3 pki]# mount -t nfs 192.168.0.110:/srv/salt /srv/salt
[root@node3 pki]# mount   # 查看挂载

在master2上测试:

[root@node3 salt]# salt-key -A
The following keys are going to be accepted:
Unaccepted Keys:
Master
node1
node3
Proceed? [n/Y] y
Key for minion Master accepted.
Key for minion node1 accepted.
Key for minion node3 accepted.

[root@node3 master]# salt '*' test.ping
node1:
True
node3:
True
Master:
True

作业:

1.系统初始化

2.搭建集群

vip
haproxy+keepalived 主从
web集群(apache+php 2台)
redis集群(主从) mysql集群(主从)
原文地址:https://www.cnblogs.com/yangmeichong/p/15511344.html