# ==================================================================ELK环境准备

# 修改文件限制
# * 代表Linux所有用户名称,保存、退出、重新登录生效。
vi /etc/security/limits.conf * soft nofile 65536
* hard nofile 65536
* soft nproc 2048
* hard nproc 4096
* soft memlock unlimited
* hard memlock unlimited # 调整进程数
#调整成以下配置
vi /etc/security/limits.d/20-nproc.conf * soft nproc 4096
root soft nproc unlimited # 调整虚拟内存&最大并发连接
vi /etc/sysctl.conf vm.max_map_count=655360
fs.file-max=655360 # 并执行命令生效:
sysctl -p shutdown -h now
# 快照 elk前
# ==================================================================安装 elasticsearch
tar -zxvf ~/elasticsearch-6.2.4.tar.gz -C /usr/local
rm –r ~/elasticsearch-6.2.4.tar.gz # ==================================================================安装 logstash
tar -zxvf ~/logstash-6.2.4.tar.gz -C /usr/local
rm –r ~/logstash-6.2.4.tar.gz # ==================================================================安装 kibana
tar -zvxf ~/kibana-6.2.4-linux-x86_64.tar.gz -C /usr/local
mv /usr/local/kibana-6.2.4-linux-x86_64 /usr/local/kibana-6.2.4
rm –r ~/kibana-6.2.4-linux-x86_64.tar.gz

# 环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加

export JAVA_HOME=/usr/java/jdk1.8.0_111
export JRE_HOME=/usr/java/jdk1.8.0_111/jre
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0
export STORM_HOME=/usr/local/storm-1.1.0
export REDIS_HOME=/usr/local/redis-4.0.2
export ERLANG_HOME=/usr/local/erlang
export RABBITMQ_HOME=/usr/local/rabbitmq_server-3.7.5
export MONGODB_HOME=/usr/local/mongodb-3.4.5
export NGINX_HOME=/usr/local/nginx
export CATALINA_BASE=/usr/local/tomcat
export CATALINA_HOME=/usr/local/tomcat
export TOMCAT_HOME=/usr/local/tomcat
export KEEPALIVED_HOME=/usr/local/keepalived
export ELASTICSEARCH_HOME=/usr/local/elasticsearch-6.2.4
export LOGSTASH_HOME=/usr/local/logstash-6.2.4
export KIBANA_HOME=/usr/local/kibana-6.2.4 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$STORM_HOME/bin:$REDIS_HOME/bin:$ERLANG_HOME/bin:$RABBITMQ_HOME/ebin:$RABBITMQ_HOME/sbin:$MONGODB_HOME/bin:$NGINX_HOME/sbin:$CATALINA_HOME/bin:$KEEPALIVED_HOME/sbin:$ELASTICSEARCH_HOME/bin:$LOGSTASH_HOME/bin:$KIBANA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $ELASTICSEARCH_HOME
echo $LOGSTASH_HOME
echo $KIBANA_HOME # 由于Elasticsearch、Logstash、Kibana均不能以root账号运行
# 但是Linux对非root账号可并发操作的文件、线程都有限制
# useradd elk (用户名) -g elk (组名) -p 123456 (密码)
groupadd elk
useradd elk -g elk -p 123456 chown -R elk:elk $ELASTICSEARCH_HOME/ # 账号切换到 elk
su - elk # 创建Elasticsearch主目录、数据目录、日志目录
mkdir $ELASTICSEARCH_HOME/data
# mkdir $ELASTICSEARCH_HOME/logs # chown -R elk:elk $ELASTICSEARCH_HOME/data
# chown -R elk:elk $ELASTICSEARCH_HOME/logs
# ll $ELASTICSEARCH_HOME # 修改elasticsearch配置文件
vi $ELASTICSEARCH_HOME/config/elasticsearch.yml # 集群名
cluster.name: es_cluster
# 节点名
node.name: node1
# 节点host/ip
network.host: node1
http.port: 9200
# TCP传输端口
transport.tcp.port: 9300
# 数据保存目录
path.data: /usr/local/elasticsearch-6.2.4/data
# 日志保存目录
path.logs: /usr/local/elasticsearch-6.2.4/logs
# 是否允许作为主节点
node.master: true
# 是否保存数据
node.data: true
# 集群广播
# discovery.zen.ping.multicast.enabled: true
# 集群中的主节点的初始列表,当节点(主节点或者数据节点)启动时使用这个列表进行探测
discovery.zen.ping.unicast.hosts: ["node1","node2","node3"]
# 主节点个数
discovery.zen.minimum_master_nodes: 2
#避免出现跨域问题
http.cors.enabled: true
http.cors.allow-origin: "*" su root scp -r $ELASTICSEARCH_HOME node2:/usr/local/
scp -r $ELASTICSEARCH_HOME node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $ELASTICSEARCH_HOME

# ==================================================================node2

groupadd elk
useradd elk -g elk -p 123456
chown -R elk:elk $ELASTICSEARCH_HOME/ # 账号切换到 elk
su - elk # 修改elasticsearch配置文件
vi $ELASTICSEARCH_HOME/config/elasticsearch.yml node.name: node2
network.host: node2

# ==================================================================node3

groupadd elk
useradd elk -g elk -p 123456
chown -R elk:elk $ELASTICSEARCH_HOME/ # 账号切换到 elk
su - elk # 修改elasticsearch配置文件
#vi $ELASTICSEARCH_HOME/config/elasticsearch.yml node.name: node3
network.host: node3

su - root

shutdown -h now
# 快照 Elasticsearch启动前

# 启动&健康检查 Elasticsearch

# ==================================================================node1 node2 node3

su - elk

nohup $ELASTICSEARCH_HOME/bin/elasticsearch > $ELASTICSEARCH_HOME/logs/es.log 2>&1 &
# $ELASTICSEARCH_HOME/bin/elasticsearch # 查看集群节点
curl -XGET 'http://node1:9200/_cat/nodes?pretty' # 查看健康状态
curl http://node1:9200/_cluster/health # 查看可以监测的参数
curl http://node1:9200/_cat # 查看集群健康信息
curl http://node1:9200/_cat/health

# 安装 elasticsearch 插件 cerebro
# https://github.com/lmenezes/cerebro/releases
# ==================================================================cerebro 安装

su - root

tar -xivf ~/cerebro-0.8.1.tgz -C /usr/local/
mv /usr/local/cerebro-0.8.1 /usr/local/cerebro # 启动
mkdir /usr/local/cerebro/logs nohup /usr/local/cerebro/bin/cerebro > /usr/local/cerebro/logs/cerebro.log 2>&1 &
# /usr/local/cerebro/bin/cerebro & # 默认端口为9000
# http://node1:9000
# 在文本框中输入下面的地址
# http://node1:9200

# 配置kibana
# ==================================================================node1

chown -R elk:elk $KIBANA_HOME/

# 账号切换到 elk
su - elk # mkdir $KIBANA_HOME/data
mkdir $KIBANA_HOME/logs # 修改配置
vi $KIBANA_HOME/config/kibana.yml #增加以下内容
server.port: 5601
server.host: "node1"
server.name: "kibana-master"
elasticsearch.url: "http://node1:9200"
elasticsearch.url: "http://node2:9200"
elasticsearch.url: "http://node3:9200" # 启动
su - elk nohup $KIBANA_HOME/bin/kibana > $KIBANA_HOME/logs/kibana.log 2>&1 &
# $KIBANA_HOME/bin/kibana
# http://node1:5601

# 配置logstash
# ==================================================================node1

su - root

chown -R elk:elk $LOGSTASH_HOME/

# ll $LOGSTASH_HOME

# 账号切换到 elk
su - elk # 创建Logstash主目录、配置目录、数据目录、日志目录
mkdir $LOGSTASH_HOME/logs # 配置数据&日志目录
vi $LOGSTASH_HOME/config/logstash.yml # 增加以下内容
path.data: /usr/local/logstash-6.2.4/data
path.logs: /usr/local/logstash-6.2.4/logs su - root scp -r $LOGSTASH_HOME node2:/usr/local/
scp -r $LOGSTASH_HOME node3:/usr/local/

# ==================================================================node2 node3

su - root

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $LOGSTASH_HOME/ chown -R elk:elk $LOGSTASH_HOME/

# 启动
# ==================================================================node1 node2 node3

# 账号切换到 elk
su - elk # 如果没有启动
nohup $ELASTICSEARCH_HOME/bin/elasticsearch > $ELASTICSEARCH_HOME/logs/es.log 2>&1 &

# ==================================================================node1

nohup /usr/local/cerebro/bin/cerebro > /usr/local/cerebro/logs/cerebro.log 2>&1 &

nohup $KIBANA_HOME/bin/kibana > $KIBANA_HOME/logs/kibana.log 2>&1 &

# ==================================================================node1

$LOGSTASH_HOME/bin/logstash -e 'input { stdin { } } output { stdout {} }'
# hello $LOGSTASH_HOME/bin/logstash -e 'input { stdin { } } output { stdout {codec => rubydebug} }'
# hi su - root chown -R elk:elk /var/log/messages su - elk vi $LOGSTASH_HOME/config/system.conf input {
file {
path => "/var/log/messages"
type => "system"
start_position => "beginning"
}
}
output {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "system-%{+YYYY.MM.dd}"
}
} $LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/system.conf su - root chown -R elk:elk /var/log/secure su - elk vi $LOGSTASH_HOME/config/system_secure.conf # 添加secure日志的路径
input {
file {
path => "/var/log/messages"
type => "system"
start_position => "beginning"
} file {
path => "/var/log/secure"
type => "secure"
start_position => "beginning"
}
}
output {
if [type] == "system" {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "logs-system-%{+YYYY.MM.dd}"
}
} if [type] == "secure" {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "logs-secure-%{+YYYY.MM.dd}"
}
}
} $LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/system_secure.conf vi $LOGSTASH_HOME/config/nginx_log.conf input {
file {
path => ["/usr/local/nginx/logs/access.log"]
type => "nginx_log"
start_position => "beginning"
}
}
filter {
grok {
match => { "message" => "%{IPORHOST:http_host} %{IPORHOST:clientip} - %{USERNAME:remote_user} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:http_verb} %{NOTSPACE:http_request}(?: HTTP/%{NUMBER:http_version})?|%{DATA:raw_http_request})\" %{NUMBER:response} (?:%{NUMBER:bytes_read}|-) %{QS:referrer} %{QS:agent} %{QS:xforwardedfor} %{NUMBER:request_time:float}"}
}
geoip {
source => "clientip"
}
}
output {
if [type] == "nginx_log" {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "nginx_log-%{+YYYY.MM.dd}"
}
}
} $LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/nginx_log.conf

hadoop生态搭建(3节点)-16.elk配置的更多相关文章

  1. hadoop生态搭建(3节点)

    软件:CentOS-7    VMware12    SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...

  2. hadoop生态搭建(3节点)-08.kafka配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  3. hadoop生态搭建(3节点)-13.mongodb配置

    # 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongod ...

  4. hadoop生态搭建(3节点)-04.hadoop配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  5. hadoop生态搭建(3节点)-10.spark配置

    # https://www.scala-lang.org/download/2.12.4.html# ================================================= ...

  6. hadoop生态搭建(3节点)-15.Nginx_Keepalived_Tomcat配置

    # Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...

  7. hadoop生态搭建(3节点)-03.zookeeper配置

    # https://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html # ===== ...

  8. hadoop生态搭建(3节点)-09.flume配置

    # http://archive.apache.org/dist/flume/1.8.0/# ===================================================== ...

  9. hadoop生态搭建(3节点)-12.rabbitmq配置

    # 安装 需要相关包# ==================================================================node1 node2 node3 yum ...

随机推荐

  1. MYSQL连接相关参数和状态值详解

    针对mysql的连接参数和状态值,本文做些介绍和对比 一.MYSQL连接参数变量 1.常用连接数限制参数 show variables like '%connect%'; | max_connect_ ...

  2. 【Leetcode】【Medium】Gas Station

    There are N gas stations along a circular route, where the amount of gas at station i is gas[i]. You ...

  3. AutoHotkey的函数对象的Bind方法绑定参数的应用

    近期在写Excel数据批处理函数,想提取某列的每个数据是否匹配某某条件的所有单元格. 这种需求比较多,比如判断的值有:单元格值,字体颜色,单元格颜色等等, 判断条件有:相同,不同,正则,或在某多行字符 ...

  4. 罗大佑 光阴的故事 ZT 欧美经典歌曲100首(1-50)

    老俞 my idol ———————————————————————————————— 罗大佑 光阴的故事 歌曲光阴的故事为2008年入库,是罗大佑在2000-1-1发行的专辑<情歌精选> ...

  5. LaTeX-手动安装宏包(package)以及生成帮助文档的整套流程

    我使用的是ctex套装,本来已经自带了许多package,但是有时候还是需要使用一些没有预装的宏包,这时就需要自己安装package了.下载package可以从CTAN(Comprehensive T ...

  6. [零基础学JAVA]Java SE基础部分-02.标识符、数据类型

    转自:http://redking.blog.51cto.com/27212/114976 1.课程名称:标识符.数据类型 本季介绍了Java中的标识符的命名规则,各种关键字及数据类型的划分,并对各种 ...

  7. nginx里配置跨域

    发布于 881天前  作者 wendal  1404 次浏览  复制  上一个帖子  下一个帖子  标签: nginx 跨域 if ($request_method = OPTIONS ) { add ...

  8. phonegap 捕获图片,音频,视屏 api capture

    一. capture Api 简单介绍 capture 对象用于获取视屏,音频和图像,它是一个全局对象,通过 navigator.device.capture 来访问 方法: capture.capt ...

  9. Intellii IDEA 中快速补全main方法:psvm

    psvm可以快速补全main方法 效果:

  10. swift的动态库

    共享可执行文件 iOS 有沙箱机制,不能跨App间共享共态库,但Apple开放了App Extension,可以在App和Extension间共间动态库(这也许是Apple开放动态链接库的唯一原因了) ...