hadoop生态搭建(3节点)-16.elk配置
# ==================================================================ELK环境准备
# 修改文件限制
# * 代表Linux所有用户名称,保存、退出、重新登录生效。
vi /etc/security/limits.conf * soft nofile 65536
* hard nofile 65536
* soft nproc 2048
* hard nproc 4096
* soft memlock unlimited
* hard memlock unlimited # 调整进程数
#调整成以下配置
vi /etc/security/limits.d/20-nproc.conf * soft nproc 4096
root soft nproc unlimited # 调整虚拟内存&最大并发连接
vi /etc/sysctl.conf vm.max_map_count=655360
fs.file-max=655360 # 并执行命令生效:
sysctl -p shutdown -h now
# 快照 elk前
# ==================================================================安装 elasticsearch
tar -zxvf ~/elasticsearch-6.2.4.tar.gz -C /usr/local
rm –r ~/elasticsearch-6.2.4.tar.gz # ==================================================================安装 logstash
tar -zxvf ~/logstash-6.2.4.tar.gz -C /usr/local
rm –r ~/logstash-6.2.4.tar.gz # ==================================================================安装 kibana
tar -zvxf ~/kibana-6.2.4-linux-x86_64.tar.gz -C /usr/local
mv /usr/local/kibana-6.2.4-linux-x86_64 /usr/local/kibana-6.2.4
rm –r ~/kibana-6.2.4-linux-x86_64.tar.gz
# 环境变量
# ==================================================================node1 node2 node3
vi /etc/profile # 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加 export JAVA_HOME=/usr/java/jdk1.8.0_111
export JRE_HOME=/usr/java/jdk1.8.0_111/jre
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0
export STORM_HOME=/usr/local/storm-1.1.0
export REDIS_HOME=/usr/local/redis-4.0.2
export ERLANG_HOME=/usr/local/erlang
export RABBITMQ_HOME=/usr/local/rabbitmq_server-3.7.5
export MONGODB_HOME=/usr/local/mongodb-3.4.5
export NGINX_HOME=/usr/local/nginx
export CATALINA_BASE=/usr/local/tomcat
export CATALINA_HOME=/usr/local/tomcat
export TOMCAT_HOME=/usr/local/tomcat
export KEEPALIVED_HOME=/usr/local/keepalived
export ELASTICSEARCH_HOME=/usr/local/elasticsearch-6.2.4
export LOGSTASH_HOME=/usr/local/logstash-6.2.4
export KIBANA_HOME=/usr/local/kibana-6.2.4 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$STORM_HOME/bin:$REDIS_HOME/bin:$ERLANG_HOME/bin:$RABBITMQ_HOME/ebin:$RABBITMQ_HOME/sbin:$MONGODB_HOME/bin:$NGINX_HOME/sbin:$CATALINA_HOME/bin:$KEEPALIVED_HOME/sbin:$ELASTICSEARCH_HOME/bin:$LOGSTASH_HOME/bin:$KIBANA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
# ==================================================================node1
# 使环境变量生效
source /etc/profile # 查看配置结果
echo $ELASTICSEARCH_HOME
echo $LOGSTASH_HOME
echo $KIBANA_HOME # 由于Elasticsearch、Logstash、Kibana均不能以root账号运行
# 但是Linux对非root账号可并发操作的文件、线程都有限制
# useradd elk (用户名) -g elk (组名) -p 123456 (密码)
groupadd elk
useradd elk -g elk -p 123456 chown -R elk:elk $ELASTICSEARCH_HOME/ # 账号切换到 elk
su - elk # 创建Elasticsearch主目录、数据目录、日志目录
mkdir $ELASTICSEARCH_HOME/data
# mkdir $ELASTICSEARCH_HOME/logs # chown -R elk:elk $ELASTICSEARCH_HOME/data
# chown -R elk:elk $ELASTICSEARCH_HOME/logs
# ll $ELASTICSEARCH_HOME # 修改elasticsearch配置文件
vi $ELASTICSEARCH_HOME/config/elasticsearch.yml # 集群名
cluster.name: es_cluster
# 节点名
node.name: node1
# 节点host/ip
network.host: node1
http.port: 9200
# TCP传输端口
transport.tcp.port: 9300
# 数据保存目录
path.data: /usr/local/elasticsearch-6.2.4/data
# 日志保存目录
path.logs: /usr/local/elasticsearch-6.2.4/logs
# 是否允许作为主节点
node.master: true
# 是否保存数据
node.data: true
# 集群广播
# discovery.zen.ping.multicast.enabled: true
# 集群中的主节点的初始列表,当节点(主节点或者数据节点)启动时使用这个列表进行探测
discovery.zen.ping.unicast.hosts: ["node1","node2","node3"]
# 主节点个数
discovery.zen.minimum_master_nodes: 2
#避免出现跨域问题
http.cors.enabled: true
http.cors.allow-origin: "*" su root scp -r $ELASTICSEARCH_HOME node2:/usr/local/
scp -r $ELASTICSEARCH_HOME node3:/usr/local/
# ==================================================================node2 node3
# 使环境变量生效
source /etc/profile # 查看配置结果
echo $ELASTICSEARCH_HOME
# ==================================================================node2
groupadd elk
useradd elk -g elk -p 123456
chown -R elk:elk $ELASTICSEARCH_HOME/ # 账号切换到 elk
su - elk # 修改elasticsearch配置文件
vi $ELASTICSEARCH_HOME/config/elasticsearch.yml node.name: node2
network.host: node2
# ==================================================================node3
groupadd elk
useradd elk -g elk -p 123456
chown -R elk:elk $ELASTICSEARCH_HOME/ # 账号切换到 elk
su - elk # 修改elasticsearch配置文件
#vi $ELASTICSEARCH_HOME/config/elasticsearch.yml node.name: node3
network.host: node3
su - root
shutdown -h now
# 快照 Elasticsearch启动前
# 启动&健康检查 Elasticsearch
# ==================================================================node1 node2 node3
su - elk nohup $ELASTICSEARCH_HOME/bin/elasticsearch > $ELASTICSEARCH_HOME/logs/es.log 2>&1 &
# $ELASTICSEARCH_HOME/bin/elasticsearch # 查看集群节点
curl -XGET 'http://node1:9200/_cat/nodes?pretty' # 查看健康状态
curl http://node1:9200/_cluster/health # 查看可以监测的参数
curl http://node1:9200/_cat # 查看集群健康信息
curl http://node1:9200/_cat/health
# 安装 elasticsearch 插件 cerebro
# https://github.com/lmenezes/cerebro/releases
# ==================================================================cerebro 安装
su - root tar -xivf ~/cerebro-0.8.1.tgz -C /usr/local/
mv /usr/local/cerebro-0.8.1 /usr/local/cerebro # 启动
mkdir /usr/local/cerebro/logs nohup /usr/local/cerebro/bin/cerebro > /usr/local/cerebro/logs/cerebro.log 2>&1 &
# /usr/local/cerebro/bin/cerebro & # 默认端口为9000
# http://node1:9000
# 在文本框中输入下面的地址
# http://node1:9200
# 配置kibana
# ==================================================================node1
chown -R elk:elk $KIBANA_HOME/ # 账号切换到 elk
su - elk # mkdir $KIBANA_HOME/data
mkdir $KIBANA_HOME/logs # 修改配置
vi $KIBANA_HOME/config/kibana.yml #增加以下内容
server.port: 5601
server.host: "node1"
server.name: "kibana-master"
elasticsearch.url: "http://node1:9200"
elasticsearch.url: "http://node2:9200"
elasticsearch.url: "http://node3:9200" # 启动
su - elk nohup $KIBANA_HOME/bin/kibana > $KIBANA_HOME/logs/kibana.log 2>&1 &
# $KIBANA_HOME/bin/kibana
# http://node1:5601
# 配置logstash
# ==================================================================node1
su - root chown -R elk:elk $LOGSTASH_HOME/ # ll $LOGSTASH_HOME # 账号切换到 elk
su - elk # 创建Logstash主目录、配置目录、数据目录、日志目录
mkdir $LOGSTASH_HOME/logs # 配置数据&日志目录
vi $LOGSTASH_HOME/config/logstash.yml # 增加以下内容
path.data: /usr/local/logstash-6.2.4/data
path.logs: /usr/local/logstash-6.2.4/logs su - root scp -r $LOGSTASH_HOME node2:/usr/local/
scp -r $LOGSTASH_HOME node3:/usr/local/
# ==================================================================node2 node3
su - root # 使环境变量生效
source /etc/profile # 查看配置结果
echo $LOGSTASH_HOME/ chown -R elk:elk $LOGSTASH_HOME/
# 启动
# ==================================================================node1 node2 node3
# 账号切换到 elk
su - elk # 如果没有启动
nohup $ELASTICSEARCH_HOME/bin/elasticsearch > $ELASTICSEARCH_HOME/logs/es.log 2>&1 &
# ==================================================================node1
nohup /usr/local/cerebro/bin/cerebro > /usr/local/cerebro/logs/cerebro.log 2>&1 & nohup $KIBANA_HOME/bin/kibana > $KIBANA_HOME/logs/kibana.log 2>&1 &
# ==================================================================node1
$LOGSTASH_HOME/bin/logstash -e 'input { stdin { } } output { stdout {} }'
# hello $LOGSTASH_HOME/bin/logstash -e 'input { stdin { } } output { stdout {codec => rubydebug} }'
# hi su - root chown -R elk:elk /var/log/messages su - elk vi $LOGSTASH_HOME/config/system.conf input {
file {
path => "/var/log/messages"
type => "system"
start_position => "beginning"
}
}
output {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "system-%{+YYYY.MM.dd}"
}
} $LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/system.conf su - root chown -R elk:elk /var/log/secure su - elk vi $LOGSTASH_HOME/config/system_secure.conf # 添加secure日志的路径
input {
file {
path => "/var/log/messages"
type => "system"
start_position => "beginning"
} file {
path => "/var/log/secure"
type => "secure"
start_position => "beginning"
}
}
output {
if [type] == "system" {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "logs-system-%{+YYYY.MM.dd}"
}
} if [type] == "secure" {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "logs-secure-%{+YYYY.MM.dd}"
}
}
} $LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/system_secure.conf vi $LOGSTASH_HOME/config/nginx_log.conf input {
file {
path => ["/usr/local/nginx/logs/access.log"]
type => "nginx_log"
start_position => "beginning"
}
}
filter {
grok {
match => { "message" => "%{IPORHOST:http_host} %{IPORHOST:clientip} - %{USERNAME:remote_user} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:http_verb} %{NOTSPACE:http_request}(?: HTTP/%{NUMBER:http_version})?|%{DATA:raw_http_request})\" %{NUMBER:response} (?:%{NUMBER:bytes_read}|-) %{QS:referrer} %{QS:agent} %{QS:xforwardedfor} %{NUMBER:request_time:float}"}
}
geoip {
source => "clientip"
}
}
output {
if [type] == "nginx_log" {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "nginx_log-%{+YYYY.MM.dd}"
}
}
} $LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/nginx_log.conf
hadoop生态搭建(3节点)-16.elk配置的更多相关文章
- hadoop生态搭建(3节点)
软件:CentOS-7 VMware12 SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...
- hadoop生态搭建(3节点)-08.kafka配置
如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...
- hadoop生态搭建(3节点)-13.mongodb配置
# 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongod ...
- hadoop生态搭建(3节点)-04.hadoop配置
如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...
- hadoop生态搭建(3节点)-10.spark配置
# https://www.scala-lang.org/download/2.12.4.html# ================================================= ...
- hadoop生态搭建(3节点)-15.Nginx_Keepalived_Tomcat配置
# Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...
- hadoop生态搭建(3节点)-03.zookeeper配置
# https://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html # ===== ...
- hadoop生态搭建(3节点)-09.flume配置
# http://archive.apache.org/dist/flume/1.8.0/# ===================================================== ...
- hadoop生态搭建(3节点)-12.rabbitmq配置
# 安装 需要相关包# ==================================================================node1 node2 node3 yum ...
随机推荐
- 属性锁TimeLock
属性锁TimeLock 这是本人所写的几个类,用于对属性设置值时进行锁定与解锁,适用于高逼格动画当中. 源码: TimeLock.h 与 TimeLock.m // // TimeLock.h // ...
- Oracle 12C pluggable database自启动
实验环境创建了两个PDB,本实验实现在开启数据库时,实现pluggable database PDB2自启动: 原始环境: SQL> shu immediateDatabase closed.D ...
- TP5:隐藏inde.php文件
原文地址:https://www.cnblogs.com/wangjiayi/p/7943268.html 一,找到/public/.htaccess文件,如果你的入口文件已经移动到根目录下,那么你的 ...
- 一个asp.net小项目总结
写这篇文章之前先吐槽一下,最近换了一个公司,是给一个国企做外包,有两个月了,感觉这里的气氛有点不爽,还有点怀念以前的公司.具体听我说来,这里有几个团队,.net,java,手机开发,.net只有6个人 ...
- 基于IDEA的JavaWeb开发环境搭建
基于IDEA的JavaWeb开发环境搭建 基于IDEA的JavaWeb开发环境搭建 jdk下载安装配置环境变量 下载 安装 配置环境变量 下载安装激活使用IntelliJ IDEA 下载 安装 激活 ...
- PhoneGap API 之多媒体
一. MediaApi 简单介绍 PhoneGap API Media 对象提供录制和回放设备上的音频文件的能力 参数: var media = new Media(src, mediaSuccess ...
- PyCharm的Debug工具栏中的Watches
In the Watches pane you can evaluate any number of variables or expressions in the context of the cu ...
- Django logging的介绍
Django用的是Python buildin的logging模块. Python logging由四部分组成: Loggers - 记录器 Handles - 处理器 Filters - 过滤器 F ...
- [19/04/04-星期四] IO技术_CommonsIO(通用IO,别人造的轮子,FileUtils类 操作文件 & IOUtilsl类 操作里边的内容 )
一.概念 JDK中提供的文件操作相关的类,但是功能都非常基础,进行复杂操作时需要做大量编程工作.实际开发中,往往需要 你自己动手编写相关的代码,尤其在遍历目录文件时,经常用到递归,非常繁琐. Apac ...
- 如何指定安装webpack
在此我要推荐webpack简易学习教程:https://www.runoob.com/w3cnote/webpack-tutorial.html 大家可以参考这个菜鸟教程,但是这个菜鸟教程有其局限性, ...