自动化运维之日志系统Logstash解耦实践(八) - youngperson/study-100 GitHub Wiki
1、Logstach日志整合
1.将所有需要收集的日志写入一个配置文件,发送至node4的Redis服务(以下配置文件在各个节点上)。
# cd /etc/logstash/conf.d
vi input_file_output_redis.conf
input {
#system
syslog {
type => "system_rsyslog"
host => "192.168.44.12"
port => 514
}
#java
file {
type => "elasticsearch"
path => "/var/log/elasticsearch/elk-cluter.log"
codec => multiline {
pattern => "^\["
negate => true
what => "previous"
}
}
#nginx
file {
type => "access_nginx"
path => "/var/log/nginx/access_json.log"
}
#tcp
tcp {
type => "tcp"
host => "192.168.44.12"
port => 6666
mode => "server"
}
}
output {
#多行文件判断
if [type] == "system_rsyslog" {
redis {
host => "192.168.44.13"
port=> 6379
data_type => "list"
key => "system_rsyslog"
}
}
if [type] == "elasticsearch" {
redis {
host => "192.168.44.13"
port=> 6379
data_type => "list"
key => "access_es_log"
}
}
if [type] == "access_nginx" {
redis {
host => "192.168.44.13"
port=> 6379
data_type => "list"
key => "access_nginx"
}
}
if [type] == "tcp" {
redis {
host => "192.168.44.13"
port=> 6379
data_type => "list"
key => "tcp_log"
}
}
}
2.运行
# /opt/logstash/bin/logstash -f /etc/logstash/conf.d/input_file_output_redis.conf &
3.测试
# 安装之前的文章去测试,看数据
2、将Redis消息队列收集的所有日志,写入Elasticsearch集群
1.配置
cd /etc/logstash/conf.d
vi input_redis_output_es.conf
input {
redis {
type => "system_rsyslog"
host => "192.168.44.13"
port => 6379
data_type => "list"
key => "system_rsyslog"
}
#java
redis {
type => "elasticsearch"
host => "192.168.44.13"
port => 6379
data_type => "list"
key => "access_es_log"
}
#nginx
redis {
type => "access_nginx"
host => "192.168.44.13"
port => 6379
data_type => "list"
key => "access_nginx"
}
#tcp
redis {
type => "tcp"
host => "192.168.44.13"
port => 6379
data_type => "list"
key => "tcp_log"
}
}
output {
#多行文件判断
if [type] == "system_rsyslog" {
elasticsearch {
hosts => ["192.168.44.10:9200","192.168.44.11:9200", "192.168.44.12:9200"]
index => "system_rsyslog_%{+YYYY.MM.dd}"
}
}
if [type] == "elasticsearch" {
elasticsearch {
hosts => ["192.168.44.10:9200","192.168.44.11:9200", "192.168.44.12:9200"]
index => "access_es_%{+YYYY.MM.dd}"
}
}
if [type] == "access_nginx" {
elasticsearch {
hosts => ["192.168.44.10:9200","192.168.44.11:9200", "192.168.44.12:9200"]
index => "access_nginx_%{+YYYY.MM.dd}"
}
}
if [type] == "tcp" {
elasticsearch {
hosts => ["192.168.44.10:9200","192.168.44.11:9200", "192.168.44.12:9200"]
index => "tcp_log_%{+YYYY.MM.dd}"
}
}
}
2.运行
# /opt/logstash/bin/logstash -f /etc/logstash/conf.d/input_redis_output_es.conf &
3.测试
制造点数据,查看node1上的监控工具和kibana,并创建索引
创建的kibana索引和配置文件中的index保持一致才行
logstash中的index => "tcp_log_%{+YYYY.MM.dd}" 对应着kinaba的[tcp_log_]YYYY.MM.DD