elk集成安装配置

三台虚拟机 193,194,195 本机 78

流程 pythonserver -> nginx -> logstash_shipper->kafka->logstash_index->es->kibana

pythonserver 通过tcp,udp 发送log到 kakfa

nginx 产生的log有logstash_shiper收集并发送到 kafka

logstash_index 获取kafka中数据 传送到 es 集群 并filter数据

kibana 展现 es数据

安装配置记录
1 原文件lib拷贝

mkdir -p /home/eamon/elk
cd /home/eamon/elk/
scp -r eamon@192.168.6.78:/home/eamon/study/elk/lib .

2 配置jdk JAVA_HOME
vim /etc/environment

JAVA_HOME="/home/eamon/elk/lib/jdk1.8.0_60"
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:$JAVA_HOME/bin"

验证 source environment java javac $JAVA_HOME

3 配置nginx
安装
# sudo apt-get install nginx
个人强烈建议打开 Nginx 的 access_log 配置项的 buffer 参数,对极限响应性能有极大提升!(???TODO)

# sudo vim nginx.conf

日志格式定义
log_format main '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" ';

反向代理
# sudo vim .sites-enabled/helloconf

# You may add here your
upstream t {
server 127.0.0.1:8005 weight=5;
}

server {
listen 80;
server_name 192.168.6.194;

location / {
proxy_pass http://t;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_cache_valid all 1m;
}
}

4 配置logstash logstash-1.5.4 依赖jdk8
测试
# bin/logstash -e 'input{stdin{}}output{stdout{codec=>rubydebug}}'

index.conf------------------------------

input {
kafka {
zk_connect => "localhost:2181"
group_id => "logstash"
topic_id => "test"
codec => plain
reset_beginning => false
consumer_threads => 5
decorate_events => true
}
}

filter {
grok {
match => [ "message", "%{IPORHOST:source_ip} - %{USERNAME:remote_user} [%{HTTPDATE:timestamp}] %{QS:request} %{INT:status} %{INT:body_bytes_sent} %{QS:http_referer} %{QS:http_user_agent}" ]
}
}


output {

elasticsearch {
host => "192.168.6.194"
protocol => "http"
workers => 5
index => "logstash-%{type}-%{+YYYY.MM.dd}"
document_type => "%{type}"
template_overwrite => false
}
}

shipper.conf-------------------------------

input {
file{
path => "/var/log/nginx/*.log"
#start_position => beginning
}
}

filter {
if [path] =~ "access" {
mutate { replace => { type => "access" }
} else if [path] =~ "error" {
mutate { replace => { type => "error" } }
} else {
mutate { replace => { type => "random_logs" } }
}
grok {
match => [ "message", "%{IPORHOST:source_ip} - %{USERNAME:remote_user} [%{HTTPDATE:timestamp}] %{QS:request} %{INT:status} %{INT:body_bytes_sent} %{QS:http_referer} %{QS:http_user_agent}" ]
}
}

output {
stdout {
codec => rubydebug
}

kafka {
broker_list => "localhost:9092"
topic_id => "test"
compression_codec => "snappy"
workers => 1
}
}

udp tcp -----------------------------------------
input {
tcp {
port => 5000
type => syslog
}
udp {
port => 5000
type => syslog
}
}
测试 telnet localhost 5000

5 配置es elasticsearch-1.7.2
修改集群名 同网段内名称唯一 ,一个网段内的所有名称相同的自动成为一个集群
# vim config/elasticsearch.yml
cluster.name: eamones
测试
http://192.168.6.194:9200/_count?pretty

6 配置kafka kafka_2.10-0.8.2.2

6.1 配置zookeeper
$ vim config/zookeeper.properties

tickTime=2000
dataDir=/data/zookeeper/
clientPort=2181
initLimit=5
syncLimit=2
server.1=192.168.0.10:2888:3888
server.2=192.168.0.11:2888:3888
server.3=192.168.0.12:2888:3888

# vim server.properties
broker.id=1
设置测试kafka
# create "logstash_logs" topic
$ bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic logstash_logs
$ bin/kafka-topics.sh --list --zookeeper localhost:2181
$ bin/logstash -e "input { stdin {} } output { kafka { topic_id => 'logstash_logs' } }"

7 配置kibana

修改kibana.yml
url

测试 http://192.168.6.194:5601/

原文地址:https://www.cnblogs.com/zhangeamon/p/4897350.html