elasticsearch5.5安装

 

beat --------  kafka -------- logstash---------------elasticsearch--------------kibana

 

beat配置文件

[root@varnish1 filebeat-5.5.0-linux-x86_64]# grep -v "#" filebeat.yml  | sed '/^$/d'
filebeat.prospectors:
- input_type: log
  paths:
    - /data/*.log
tags: ["haproxy-log"]
output.kafka:
  enabled : true
  hosts: ["kafka1:9092","kafka2:9092","kafka3:9092"]
  topic: logostash-%{[type]}
[root@varnish1 filebeat-5.5.0-li

logstash配置文件

[root@logstashserver etc]# cat logstash.conf
input {
        kafka {
        bootstrap_servers => "kafka1:9092,kafka2:9092,kafka3:9092"
        topics => ["logostash-log"]
        consumer_threads => 5
        decorate_events => true
            }
}

filter {
    grok{
       patterns_dir => "/data/logstash/patterns"
       match => {"message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{IPORHOST:syslog_server} %{SYSLOGPROG}: %{IP:client_ip}:%{INT:client_port} [%{HAPROXYDATE:accept_date}] %{NOTSPACE:frontend_name} %{NOTSPACE:backend_name}/%{NOTSPACE:server_name} %{INT:time_request}/%{INT:time_queue}/%{INT:time_backend_connect}/%{INT:time_backend_response}/%{NOTSPACE:time_duration} %{INT:http_status_code} %{NOTSPACE:bytes_read} %{FENG:captured_request_cookie} %{FENG:captured_response_cookie} %{NOTSPACE:termination_state} %{INT:actconn}/%{INT:feconn}/%{INT:beconn}/%{INT:srvconn}/%{NOTSPACE:retries} %{INT:srv_queue}/%{INT:backend_queue} "%{WORD:verb} %{URIPATHPARAM:request} %{WORD:http_socke}/%{NUMBER:http_version}""}
       }
    geoip {
        source => "client_ip"
        fields => ["ip","city_name","country_name","location"]
        add_tag => [ "geoip" ]
        }
}

output {
           elasticsearch {
           hosts => ["es1:9200","es2:9200","es3:9200"]
     manage_template => true
           index => "logstash-feng.log-%{+YYYY-MM-dd}"
    }
}

 

 

 

 

在es1上操作:

下载elasticsearch5.5安装:

wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.5.0.tar.gz

解压:

tar -zxvf elasticsearch-5.5.0.tar.gz

mv elasticsearch-5.5.0   elasticsearch

 

接下来创建el用户,因为elasticsearch不允许以root运行(其实也可以运行,需要配置)。

1.设置虚拟 –Xms  -Xmx 内存大小

vim  /data/elasticsearch/config/jvm.options

2.设置虚拟内存

echo “vm.max_map_count=262144” >> /etc/sysctl.conf

3.关闭swap分区

Swapoff –a

修改配置文件fstab

#/dev/mapper/centos-swap swap           swap    defaults        0 0

4.设置elastrisearch用户名密码

useradd es

passwd es

 

5. 需要修改
/etc/security/limits.conf 
es soft memlock unlimited
es hard memlock unlimited

6.修改:
/etc/sysctl.conf 
vm.swappiness=0

之后重启机器

chown -R es:es /data/elasticsearch

su - es

cd elasticsearch/conf/

修改配置文件:

vim elasticsearch.yml

cluster.name: senyint_elasticsearch
node.name: es1
path.data: /data/elasticsearch/data
path.logs: /data/elasticsearch/logs
bootstrap.memory_lock: false
network.host: 192.168.99.8
http.port: 9200
discovery.zen.ping.unicast.hosts: ["es1", "es2", "es3"]
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.security.enabled: false  #安装x-pack 插件使用

安装X-Pack

在5.x版本中一些es插件(Shield, Watcher, Marvel, Graph, Reporting)都集成在x-pack组件中

在Es的根目录(每个节点),运行 bin/elasticsearch-plugin进行安装

bin/elasticsearch-plugin install x-pack

 

安装head插件:

#elasticsearch-head 不能放置在 /data/elasticsearch/plugins/目录下,否者报错

cd /data/elasticsearch/

git clone git://github.com/mobz/elasticsearch-head.git

 

root用户

cd /data/

wget https://nodejs.org/dist/v6.11.1/node-v6.11.1-linux-x64.tar.xz

xz -d node-v6.11.1-linux-x64.tar.xz

tar -xf node-v6.11.1-linux-x64.tar 

vi /etc/profile

export NODE_HOME=/data/node-v6.11.1-linux-x64

export PATH=${PATH}:${NODE_HOME}/bin

source /etc/profile

su -  es

cd /data/elasticsearch/elasticsearch-head/

npm install

vim /data/elasticsearch/elasticsearch-head/Gruntfile.js

 vim /data/elasticsearch/elasticsearch-head/_site/app.js

启动elasticsearch 

su - es

/data/elasticsearch/bin/elasticsearch  -d

 

启动 grunt

cd   /data/elasticsearch/plugins/elasticsearch-head/node_modules/grunt/bin

#  nohup ./grunt  server  & 

访问 http://192.168.99.9:9100

 

安装kibana

Kibana是一个强大的es图形化组件,可以通过http的方式来查看es集群的状态,操作数据等.

当前es官方推荐的拓展组件为Kibana和X-Pack,而X-Pack需要依赖于Kibana.

下载地址:https://artifacts.elastic.co/downloads/kibana/kibana-5.5.0-linux-x86_64.tar.gz

选择任意一台es节点安装kibana

vim config/kibana.yml
server.port: 5601
server.host: "192.168.10.116"
elasticsearch.url: "http://192.168.99.8:9200"
xpack.security.enabled: false 

在Kibana根目录运行 bin/kibana-plugin 进行安装

bin/kibana-plugin install x-pack

启动Kibana

启动之前需要禁用X-Pack 插件 security

vim config/kibana.yml

vim config/elasticsearch.yml

添加以下内容

xpack.security.enabled: false     #重要

nginx 日志格式

    log_format logstash_json '{ "@timestamp": "$time_local", '
                         '"client": "$remote_addr", '
                         '"hostname": "$hostname", '
                         '"remote_user": "$remote_user", '
                         '"upstream_addr": "$upstream_addr", '
                         '"upstream_response_time": "$upstream_response_time", '
                         '"body_bytes_sent": "$body_bytes_sent", '
                         '"request_time": "$request_time", '
                         '"status": "$status", '
                         '"request": "$request", '
                         '"request_method": "$request_method", '
                         '"http_referrer": "$http_referer", '
                         '"body_bytes_sent":"$body_bytes_sent", '
                         '"http_x_forwarded_for": "$http_x_forwarded_for", '
                         '"http_user_agent": "$http_user_agent"  }';

    access_log  logs/access.log  logstash_json;

filebeat 部署在nginx服务器上,收集日志

filebeat.prospectors:
- input_type: log
  paths:
    - /data/nginx/logs/access.log 
  document_type: nginx_access
output.kafka:
  enabled : true
  hosts: ["kafka1:9092","kafka2:9092","kafka3:9092"]
  topic: logostash_%{[type]}

logstash 从kafka订阅消息,并存储到es中配置文件

input {
        kafka {
                bootstrap_servers => "kafka1:9092,kafka2:9092,kafka3:9092"
                topics => ["logostash__nginx_access"]
                consumer_threads => 5
                decorate_events => true
                codec => "json"
                }
}


filter {
    json {
        remove_field => ["@timestamp","beat","type","kafka","domain","serverip","url","@version","offset","input_type","count","source","fields","beat.hostname","host","tags"]
        source => "message"
        remove_field => ["message"]
    }

}
      
output {
        elasticsearch {
        hosts => ["es1:9200","es2:9200","es3:9200"]
        manage_template => true
        index => "logstash-nginx.log-%{+YYYY-MM-dd}"
        }
}
                             
原文地址:https://www.cnblogs.com/fengjian2016/p/7216778.html