最新Centos7.6 部署ELK日志分析系统
下载elasticsearch
创建elk用户并授权
useradd elk
chown -R elk:elk /home/elk/elasticsearch
chown -R elk:elk /home/elk/elasticsearch1
chown -R elk:elk /home/elk/elasticsearch2
mkdir -p /home/eladata
mkdir -p /var/log/elk
chown -R elk:elk /home/eladata
chown -R elk:elk /var/log/elk
主节点master
elasticsearch解压,修改配置文件
/home/elk/elasticsearch/config
[root@localhost config]# grep -v "^#" elasticsearch.yml
cluster.name: my-application
node.name: node0
node.master: true
node.attr.rack: r1
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
http.port: 9200
transport.tcp.port: 9301
discovery.zen.minimum_master_nodes: 1
cluster.initial_master_nodes: ["node0"]
手动启动命令
su elk -l -c '/home/elk/elasticsearch/bin/elasticsearch -d'
启动文件 elasticsearch.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch
Environment=ES_PATH_CONF=/home/elk/elasticsearch/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
[root@localhost system]#
Node1节点
/home/elk/elasticsearch1/config
[root@localhost config]# grep -v "^#" elasticsearch.yml
cluster.name: my-application
node.name: node1
node.master: false
node.attr.rack: r1
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
transport.tcp.port: 9303
http.port: 9302
discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"]
[root@localhost config]#
手动启动命令
su elk -l -c '/home/elk/elasticsearch1/bin/elasticsearch1 -d'
启动文件 elasticsearch1.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch1.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch1
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch1
Environment=ES_PATH_CONF=/home/elk/elasticsearch1/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch1/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
[root@localhost system]#
Node2节点
/home/elk/elasticsearch2/config
[root@localhost config]# grep -v "^#" elasticsearch.yml
cluster.name: my-application
node.name: node2
node.attr.rack: r1
node.master: false
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
http.port: 9203
transport.tcp.port: 9304
discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"]
discovery.zen.minimum_master_nodes: 1
[root@localhost config]#
手动启动命令
su elk -l -c '/home/elk/elasticsearch2/bin/elasticsearch2 -d'
启动文件 elasticsearch2.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch2.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch2
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch2
Environment=ES_PATH_CONF=/home/elk/elasticsearch2/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch2
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch2/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
[root@localhost system]#
下载logstash
目录如下,默认配置即可
[root@localhost logstash]# pwd
/home/elk/logstash
[root@localhost logstash]#
手动启动命令
./logstash -f ../dev.conf
nohup ./logstash -f ../dev.conf &
下载kibana
配置文件如下
[root@localhost config]# pwd
/home/elk/kibana/config
[root@localhost config]# grep -v "^#" kibana.yml
server.host: "192.168.1.70"
elasticsearch.hosts: ["http://192.168.1.70:9200"]
kibana.index: ".kibana"
i18n.locale: "zh-CN"
手动启动命令
./kibana
nohup ./kibana &
kibana启动文件
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat kibana.service
[Unit]
Description=Kibana Server Manager
[Service]
ExecStart=/home/elk/kibana/bin/kibana
[Install]
WantedBy=multi-user.target
[root@localhost system]#
端口为:5601 访问:192.168.1.70:5601
安装Elasticsearch -head
yum install git npm
git clone https://github.com/mobz/elasticsearch-head.git
[root@localhost elasticsearch-head]# pwd
/home/elk/elasticsearch-head
[root@localhost elasticsearch-head]#
启动
npm install
npm run start
nohup npm run start &
curl -XPUT '192.168.2.67:9100/book'
访问192.168.2.67:9100 即可访问
下载kafka
修改配置文件如下
[root@localhost config]# pwd
/home/elk/kafka/config
[root@localhost config]# grep -v "^#" server.properties
broker.id=0
listeners=PLAINTEXT://192.168.1.70:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/var/log/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
[root@localhost config]#
kafka配置启动zookeeper
手动启动方式
[root@localhost bin]# pwd
/home/elk/kafka/bin
[root@localhost bin]#
./zookeeper-server-start.sh ../config/zookeeper.properties
systemctl 启动zookeeper
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat zookeeper.service
[Service]
Type=forking
SyslogIdentifier=zookeeper
Restart=always
RestartSec=0s
ExecStart=/home/elk/kafka/bin/zookeeper-server-start.sh -daemon /home/elk/kafka/config/zookeeper.properties
ExecStop=/home/elk/kafka/bin/zookeeper-server-stop.sh
[root@localhost system]#
启动kafka服务
手动启动方式
./kafka-server-start.sh ../config/server.properties
systemctl 启动kafka
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat kafka.service
[Unit]
Description=Apache kafka
After=network.target
[Service]
Type=simple
Restart=always
RestartSec=0s
ExecStart=/home/elk/kafka/bin/kafka-server-start.sh /home/elk/kafka/config/server.properties
ExecStop=/home/elk/kafka/bin/kafka-server-stop.sh
[root@localhost system]#
测试kafka
新建一个名字为test的topic
/kafka-topics.sh --create --zookeeper 192.168.1.70:2181 --replication-factor 1 --partitions 1 --topic test
查看kafka中的topic
./kafka-topics.sh --list --zookeeper 192.168.1.70:2181
往kafka topic为test中 生产消息
./kafka-console-producer.sh --broker-list 192.168.1.70:9092 --topic test
在kafka topic为test中 消费消息
bin/kafka-console-consumer.sh --bootstrap-server 192.168.1.70:9092 --topic test --from-beginning
生产的消息,消费那边接受到即是ok的
目标机器安装filebeat
安装6.5版本的
[root@localhost filebeat]# pwd
/usr/local/filebeat
[root@localhost filebeat]# cat filebeat.yml
filebeat.prospectors:
- type: log
paths:
- /opt/logs/workphone-tcp/catalina.out
fields:
tag: 54_tcp_catalina_out
- type: log
paths:
- /opt/logs/workphone-webservice/catalina.out
fields:
tag: 54_web_catalina_out
name: 192.168.1.54
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
output.kafka:
hosts: ["192.168.1.70:9092"]
topic: "filebeat-log"
partition.hash:
reachable_only: true
compression: gzip
max_message_bytes: 1000000
required_acks: 1
[root@localhost filebeat]#
安装完成后去logstash编辑配置文件
logstash操作
[root@localhost logstash]# pwd
/home/elk/logstash
[root@localhost logstash]# cat dev.conf
input {
kafka{
bootstrap_servers => "192.168.1.70:9092"
topics => ["filebeat-log"]
codec => "json"
}
}
filter {
if [fields][tag]=="jpwebmap" {
json{
source => "message"
remove_field => "message"
}
geoip {
source => "client"
target => "geoip"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
}
if [fields][tag] == "54_tcp_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "54_web_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "55_tcp_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "55_web_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "51_nginx80_access_log" {
mutate {
add_field => { "spstr" => "%{[log][file][path]}" }
}
mutate {
split => ["spstr" , "/"]
# save the last element of the array as the api_method.
add_field => ["src", "%{[spstr][-1]}" ]
}
mutate{
remove_field => [ "friends", "ecs", "agent" , "spstr" ]
}
grok {
match => { "message" => "%{IPORHOST:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time}\] \"%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent:bytes} \"%{DATA:referrer}\" \"%{DATA:agent}\" \"%{DATA:x_forwarded_for}\" \"%{NUMBER:request_time}\" \"%{DATA:upstream_addr}\" \"%{DATA:upstream_status}\"" }
remove_field => "message"
}
date {
match => ["time", "dd/MMM/yyyy:HH:mm:ss Z"]
target => "@timestamp"
}
geoip {
source => "x_forwarded_for"
target => "geoip"
database => "/home/elk/logstash/GeoLite2-City.mmdb"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
}
}
output {
if [fields][tag] == "wori"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "zabbix"
}
}
if [fields][tag] == "54_tcp_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "54_tcp_catalina_out"
}
}
if [fields][tag] == "54_web_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "54_web_catalina_out"
}
}
if [fields][tag] == "55_tcp_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "55_tcp_catalina_out"
}
}
if [fields][tag] == "55_web_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "55_web_catalina_out"
}
}
if [fields][tag] == "51_nginx80_access_log" {
stdout{}
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "51_nginx80_access_log"
}
}
}
其他的配置文件
index.conf
filter {
mutate {
add_field => { "spstr" => "%{[log][file][path]}" }
}
mutate {
split => ["spstr" , "/"]
# save the last element of the array as the api_method.
add_field => ["src", "%{[spstr][-1]}" ]
}
mutate{
remove_field => [ "friends", "ecs", "agent" , "spstr" ]
}
}
java.conf
filter {
if [fields][tag] == "java"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
} #End if
}
kafkainput.conf
input {
kafka{
bootstrap_servers => "172.16.11.68:9092"
#topics => ["ql-prod-tomcat" ]
topics => ["ql-prod-dubbo","ql-prod-nginx","ql-prod-tomcat" ]
codec => "json"
consumer_threads => 5
decorate_events => true
#auto_offset_reset => "latest"
group_id => "logstash"
#client_id => ""
############################# HELK Optimizing Latency #############################
fetch_min_bytes => "1"
request_timeout_ms => "305000"
############################# HELK Optimizing Availability #############################
session_timeout_ms => "10000"
max_poll_records => "550"
max_poll_interval_ms => "300000"
}
}
#input {
# kafka{
# bootstrap_servers => "172.16.11.68:9092"
# topics => ["ql-prod-java-dubbo","ql-prod","ql-prod-java" ]
# codec => "json"
# consumer_threads => 15
# decorate_events => true
# auto_offset_reset => "latest"
# group_id => "logstash-1"
# ############################# HELK Optimizing Latency #############################
# fetch_min_bytes => "1"
# request_timeout_ms => "305000"
# ############################# HELK Optimizing Availability #############################
# session_timeout_ms => "10000"
# max_poll_records => "550"
# max_poll_interval_ms => "300000"
# }
#}
nginx.conf
filter {
if [fields][tag] == "nginx-access" {
mutate {
add_field => { "spstr" => "%{[log][file][path]}" }
}
mutate {
split => ["spstr" , "/"]
# save the last element of the array as the api_method.
add_field => ["src", "%{[spstr][-1]}" ]
}
mutate{
remove_field => [ "friends", "ecs", "agent" , "spstr" ]
}
grok {
match => { "message" => "%{IPORHOST:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time}\] \"%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent:bytes} \"%{DATA:referrer}\" \"%{DATA:agent}\" \"%{DATA:x_forwarded_for}\" \"%{NUMBER:request_time}\" \"%{DATA:upstream_addr}\" \"%{DATA:upstream_status}\"" }
remove_field => "message"
}
date {
match => ["time", "dd/MMM/yyyy:HH:mm:ss Z"]
target => "@timestamp"
}
geoip {
source => "x_forwarded_for"
target => "geoip"
database => "/opt/logstash-6.2.4/GeoLite2-City.mmdb"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
} #endif
}
ouput.conf
output{
if [fields][tag] == "nginx-access" {
stdout{}
elasticsearch {
user => elastic
password => WR141bp2sveJuGFaD4oR
hosts => ["172.16.11.67:9200"]
index => "logstash-%{[fields][proname]}-%{+YYYY.MM.dd}"
}
}
#stdout{}
if [fields][tag] == "java" {
elasticsearch {
user => elastic
password => WR141bp2sveJuGFaD4oR
hosts => ["172.16.11.66:9200","172.16.11.68:9200"]
index => "%{[host][name]}-%{[src]}"
}
}
}
最新Centos7.6 部署ELK日志分析系统的更多相关文章
- RedHat7 部署ELK日志分析系统
一.ELK的组成二.工作流程三.环境准备四.正式安装 一.ELK的组成 ELK由ElasticSearch.Logstash和Kibana三部分组成,每一部分的功能及特点如下图所示: 二.工作流程 在 ...
- ELK日志分析系统简单部署
1.传统日志分析系统: 日志主要包括系统日志.应用程序日志和安全日志.系统运维和开发人员可以通过日志了解服务器软硬件信息.检查配置过程中的错误及错误发生的原因.经常分析日志可以了解服务器的负荷,性能安 ...
- ELK 日志分析系统的部署
一.ELK简介 ElasticSearch介绍Elasticsearch是一个基于Lucene的搜索服务器. 它提供了一个分布式多用户能力的全文搜索引擎,基于RESTful web接口. Elasti ...
- 十分钟搭建和使用ELK日志分析系统
前言 为满足研发可视化查看测试环境日志的目的,准备采用EK+filebeat实现日志可视化(ElasticSearch+Kibana+Filebeat).题目为“十分钟搭建和使用ELK日志分析系统”听 ...
- ELK日志分析系统-Logstack
ELK日志分析系统 作者:Danbo 2016-*-* 本文是学习笔记,参考ELK Stack中文指南,链接:https://www.gitbook.com/book/chenryn/kibana-g ...
- 《ElasticSearch6.x实战教程》之实战ELK日志分析系统、多数据源同步
第十章-实战:ELK日志分析系统 ElasticSearch.Logstash.Kibana简称ELK系统,主要用于日志的收集与分析. 一个完整的大型分布式系统,会有很多与业务不相关的系统,其中日志系 ...
- Docker笔记(十):使用Docker来搭建一套ELK日志分析系统
一段时间没关注ELK(elasticsearch —— 搜索引擎,可用于存储.索引日志, logstash —— 可用于日志传输.转换,kibana —— WebUI,将日志可视化),发现最新版已到7 ...
- Rsyslog+ELK日志分析系统
转自:https://www.cnblogs.com/itworks/p/7272740.html Rsyslog+ELK日志分析系统搭建总结1.0(测试环境) 因为工作需求,最近在搭建日志分析系统, ...
- elk 日志分析系统Logstash+ElasticSearch+Kibana4
elk 日志分析系统 Logstash+ElasticSearch+Kibana4 logstash 管理日志和事件的工具 ElasticSearch 搜索 Kibana4 功能强大的数据显示clie ...
随机推荐
- Win8 Metro(C#)数字图像处理--2.60部分彩色保留算法
原文:Win8 Metro(C#)数字图像处理--2.60部分彩色保留算法 [函数名称] 部分彩色保留函数 WriteableBitmap PartialcolorProcess ...
- Win8 Metro(C#)数字图像处理--2.45图像雾化效果算法
原文:Win8 Metro(C#)数字图像处理--2.45图像雾化效果算法 [函数名称] 图像雾化 AtomizationProcess(WriteableBitmap src,i ...
- CRS-2800: Cannot start resource 'ora.asm' as it is already in the INTERMEDIATE state on server ‘RAC02’
在安装ORACLE RAC的Grid Infrastructure时,在节点1运行/u01/app/11.2.0/grid/root.sh正常,当在节点2运行/u01/app/11.2.0/grid/ ...
- 中国2017 Google 开发者大会第一天简单回顾
昨天有幸参加了中国2017 Google 开发者大会,在这第一天就收获满满,昨天太忙了,今天早晨来一起简单回顾一下,可以让没有参加的童鞋们感受一下现场的温度. 早早就来到了会议现场,外面看不出什么特别 ...
- C# ACCESS 查询提示“至少一个参数没有被指定”问题
错误的SQL指令如下: sqlStr = “select * from tb_userInfo where userName=” + userName; //错误的 sql 指令 正确的SQL ...
- iOS和Android使用MQTT协议实现消息推送和即时通讯(替代XMPP,已经有TMS MQTT)
大多数应用都会涉及到即时聊天的功能,在开源方案中有XMPP和MQTT.其中XMPP是基于XML的,并且不支持消息质量QOS, 所以本身并没有消息重传的机制,从而会导致丢消息.而MQTT是基于二进制的, ...
- 多态与虚拟 : 物件导向的精髓 (侯捷在石器时代对OO的理解)
[自序]虑而後能得(自序)故事接触 C++ 大约是 1989 年的事.那时候的 PC 以现在的眼光看,除了「蛮荒」之外没有更合适的形容词了.横扫千军的 Windows 3.0 还没有诞生,如今以 C+ ...
- 桌面程序阻止Windows关机(使用Message.Result取得DefWindowProc API函数的返回值,非常重要)
Windows Client 客户端在关机,不外乎两种情况: 1. 没有处理 Windows 关机消息: 2.处理了关机消息,但是超时了: 上面这两种情况,都会让Windows 关不了机.在现实生活中 ...
- DataVeryLite入门教程(一) 配置篇
DataVeryLite 是基于.net 4.0的数据库持久化ORM框架. 目前支持的数据库有Sqlserver,Mysql,Oracle,Db2,PostgreSql,Sqlite和Access. ...
- hdu4633_Polya定理
典型的Polya定理,还算比较简单,比赛的时候知道是Polya定理但是根本没留出时间去搞,有点小遗憾. 思路:根据Burnside引理,等价类个数等于所有的置换群中的不动点的个数的平均值,根据Poly ...