###关闭防火墙
service iptables stop
###定义vi=vim
alias vi=vim
vi ~/.bashrc
alias vi='vim'
yum -y install lrzsz*
创建文件夹
mkdir -P /opt/soft
 
上传一下包到soft路径
172.25.0.77
jdk-8u101-linux-x64.rpm
kibana-5.0.0-x86_64.rpm
 
172.25.0.78、79
elasticsearch-5.0.0.rpm
jdk-8u101-linux-x64.rpm
logstash-5.0.0.rpm
 
#####安装jdk
rpm -ivh jdk-8u101-linux-x64.rpm
java -version
 
######安装kibana(172.25.0.77)
cd /opt/soft
rpm -ivh kibana-5.0.0-x86_64.rpm
 
######安装elasticsearch、redis、logstash(172.25.0.78、172.25.0.79)
cd /opt/soft
rpm -ivh elasticsearch-5.0.0.rpm
rpm -ivh logstash-5.0.0.rpm
logstash启动脚本:
/etc/init.d/logstash

#!/bin/sh
# Init script for logstash
# Maintained by Elasticsearch
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
# * Sections: 20.2, 20.3
#
### BEGIN INIT INFO
# Provides: logstash
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description:
# Description: Starts Logstash as a daemon.
### END INIT INFO

PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATH

if [ `id -u` -ne 0 ]; then
echo "You need root privileges to run this script"
exit 1
fi

name=logstash
pidfile="/var/run/$name.pid"

LS_USER=root
LS_GROUP=root
LS_HOME=/var/lib/logstash
LS_HEAP_SIZE="1000m"
LS_LOG_DIR=/var/log/logstash
LS_LOG_FILE="${LS_LOG_DIR}/$name.log"
LS_CONF_DIR=/etc/logstash/conf.d/
LS_OPEN_FILES=16384
LS_NICE=19
LS_OPTS=""

[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name

program=/usr/share/logstash/bin/logstash
args="-f ${LS_CONF_DIR} -l ${LS_LOG_FILE} ${LS_OPTS}"

start() {

LS_JAVA_OPTS="${LS_JAVA_OPTS} -Djava.io.tmpdir=${LS_HOME}"
HOME=${LS_HOME}
export PATH HOME LS_HEAP_SIZE LS_JAVA_OPTS LS_USE_GC_LOGGING

# chown doesn't grab the suplimental groups when setting the user:group - so we have to do it for it.
# Boy, I hope we're root here.
SGROUPS=$(id -Gn "$LS_USER" | tr " " "," | sed 's/,$//'; echo '')

if [ ! -z $SGROUPS ]
then
EXTRA_GROUPS="--groups $SGROUPS"
fi

# set ulimit as (root, presumably) first, before we drop privileges
ulimit -n ${LS_OPEN_FILES}

# Run the program!
nice -n ${LS_NICE} chroot --userspec $LS_USER:$LS_GROUP $EXTRA_GROUPS / sh -c "
cd $LS_HOME
ulimit -n ${LS_OPEN_FILES}
exec \"$program\" $args
" > "${LS_LOG_DIR}/$name.stdout" 2> "${LS_LOG_DIR}/$name.err" &

# Generate the pidfile from here. If we instead made the forked process
# generate it there will be a race condition between the pidfile writing
# and a process possibly asking for status.
echo $! > $pidfile

echo "$name started."
return 0
}

stop() {
# Try a few times to kill TERM the program
if status ; then
pid=`cat "$pidfile"`
echo "Killing $name (pid $pid) with SIGTERM"
kill -TERM $pid
# Wait for it to exit.
for i in 1 2 3 4 5 ; do
echo "Waiting $name (pid $pid) to die..."
status || break
sleep 1
done
if status ; then
if [ "$KILL_ON_STOP_TIMEOUT" -eq 1 ] ; then
echo "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."
kill -KILL $pid
echo "$name killed with SIGKILL."
else
echo "$name stop failed; still running."
fi
else
echo "$name stopped."
fi
fi
}

status() {
if [ -f "$pidfile" ] ; then
pid=`cat "$pidfile"`
if kill -0 $pid > /dev/null 2> /dev/null ; then
# process by this pid is running.
# It may not be our pid, but that's what you get with just pidfiles.
# TODO(sissel): Check if this process seems to be the same as the one we
# expect. It'd be nice to use flock here, but flock uses fork, not exec,
# so it makes it quite awkward to use in this case.
return 0
else
return 2 # program is dead but pid file exists
fi
else
return 3 # program is not running
fi
}

force_stop() {
if status ; then
stop
status && kill -KILL `cat "$pidfile"`
fi
}

case "$1" in
start)
status
code=$?
if [ $code -eq 0 ]; then
echo "$name is already running"
else
start
code=$?
fi
exit $code
;;
stop) stop ;;
force-stop) force_stop ;;
status)
status
code=$?
if [ $code -eq 0 ] ; then
echo "$name is running"
else
echo "$name is not running"
fi
exit $code
;;
restart)

stop && start
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|force-stop|status|restart}" >&2
exit 3
;;
esac

exit $?

######安装redis
# yum –y install tcl gcc  //安装测试依赖工具
# tar xf redis-3.2.3.tar.gz  //解压至当前目录
# cd redis-3.2.3  //切换至redis源码目录
# make  //编译
# make test  //有可能会失败,只是看看,不用在意
# make install  //安装
# mkdir /opt/redis/{db,conf} -pv  //创建redis安装目录
# cp redis.conf /opt/redis/conf/  //复制配置文件至redis安装目录
# cd src
# cp redis-benchmark redis-check-aof redis-cli redis-server mkreleasehdr.sh /opt/redis/  //复制各文件至redis安装目录
# ln -sv /opt/redis/redis-cli /usr/bin/  //将redis-cli可执行文件链接至/usr/bin目录,方便在任何目录下都可直接执行`redis-cli`
# vim /opt/redis/conf/redis.conf  //修改redis.conf 中的 `daemonize`为`yes`,让server以守护进程在后台执行,这一步可以不做,因为后面要执行的脚本会自动创建这个文件,且这个值会设置为`yes`
daemonize yes
make install仅仅在你的系统上安装了二进制文件,不会替你默认配置init脚本和配置文件,为了把它用在生产环境而安装它,在源码目录的utils目录下Redis为系统提供了
这样的一个脚本install_server.sh
# cd /opt/soft/redis-3.2.3/utils
# ./install_server.sh  //执行sh格式的安装脚本
Welcome to the redis service installer
This script will help you easily set up a running redis server
 
Please select the redis port for this instance: [6379]
Selecting default: 6379
Please select the redis config file name [/etc/redis/6379.conf] /opt/redis/conf/redis.conf
Please select the redis log file name [/var/log/redis_6379.log]
Selected default - /var/log/redis_6379.log
Please select the data directory for this instance [/var/lib/redis/6379] /opt/redis/db/6379.db
Please select the redis executable path [/usr/local/bin/redis-server]          
Selected config:
Port           : 6379
Config file    : /opt/redis/conf/redis.conf
Log file       : /var/log/redis_6379.log
Data dir       : /opt/redis/db/6379.db
Executable     : /usr/local/bin/redis-server
Cli Executable : /usr/local/bin/redis-cli
Is this ok? Then press ENTER to go on or Ctrl-C to abort.
Copied /tmp/6379.conf => /etc/init.d/redis_6379
Installing service...
Successfully added to chkconfig!
Successfully added to runlevels 345!
Starting Redis server...
Installation successful!
 # chkconfig --add redis_6379  //将redis加入系统服务
# chkconfig redis_6379 on  //加入开机启动
# vim /opt/redis/conf/redis.conf
requirepass Carsing2582#    //设置密码【坑1】:这步先不做,如果做了后面的测试连接redis的时候就需要认证)
bind 0.0.0.0  //redis要打开远程访问允许,在 redis.conf 中 bind 这一行改成 0.0.0.0(运行任何机器连接,为了测试先这样)【此为坑2】
# /etc/init.d/redis_6379 restart
# ss -an| grep 6379          //端口已经被监听,证明服务已提供
LISTEN     0      128               127.0.0.1:6379                     *:*    
TIME-WAIT  0      0                 127.0.0.1:6379             127.0.0.1:34506
 
###修改 elasticsearch配置文件 存放所收集到的日志
cd /etc/elasticsearch/
vim elasticsearch.yml
cluster.name: ELK ###集群名称
node.name: node-1 ###节点编号
node.master: true
node.data: true
path.data: /ELK/data ###elasticsearch数据路径
path.logs: /ELK/logs ###elasticsearch日志路径
network.host: zmt-elk02 ###本机主机名
discovery.zen.ping.unicast.hosts: ["zmt-elk02", "zmt-elk03"] ###自动发现节点主机名
discovery.zen.minimum_master_nodes: 2
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.security.enabled: false
 
启动异常:ERROR: bootstrap checks failed
system call filters failed to install; check the logs and fix your configuration or disable system call filters at your own risk
问题原因:因为Centos6不支持SecComp,而ES5.2.1默认bootstrap.system_call_filter为true进行检测,所以导致检测失败,失败后直接导致ES不能启动。详见 :https://github.com/elastic/elasticsearch/issues/22899
解决方法:在elasticsearch.yml中配置bootstrap.system_call_filter为false,注意要在Memory下面:
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
 
设置线程池
thread_pool:
bulk:
size: 5
queue_size: 1000
search:
size: 5
queue_size: 1000
index:
size: 5
queue_size: 1000
 
#xpack.monitoring.enabled: false
#xpack.monitoring.exporters.my_local:
# type: local
 
indices.cache.filter.expire: 30d ###设置索引30天过期
index.cache.filter: 30d
 
vim /etc/elasticsearch/jvm.options ###内存酌情分配
-Xms8g
-Xmx8g
 
vi /etc/security/limits.conf
elasticsearch soft memlock unlimited
elasticsearch hard memlock unlimited
elasticsearch soft nofile 65536
elasticsearch hard nofile 131072
elasticsearch soft nproc 2048
elasticsearch hard nproc 4096
* soft nofile 65536
* hard nofile 65536
* soft memlock unlimited
* hard memlock unlimited
 
 
 
 
创建ELK数据、日志存放路径
mkdir /ELK/data
mkdir /ELK/logs
chown -R elasticsearch:elasticsearch /ELK
启动服务
service elasticsearch start
启动服务之后查看/ELK/logs路径中有日志,说明服务正常
查看端口9200、9300
9200端口是API 用来连接服务的接口
9300是内部服务
 
PS:报错
max number of threads [1024] for user [lish] likely too low, increase to at least [2048]
解决:切换到root用户,进入limits.d目录下修改配置文件。
vi /etc/security/limits.d/90-nproc.conf 
修改如下内容:
* soft nproc 1024
#修改为
* soft nproc 2048
 
##修改主机名,定义dns
vim /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=zmt-elk02
GATEWAY=172.25.0.254
 
vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.25.0.77 zmt-elk01
172.25.0.78 zmt-elk02
172.25.0.79 zmt-elk03
注:重启生效 reboot
 
###配置kibana
cd /etc/kibana/
vim kibana.yml
server.port: 5601 ##定义kibana端口
server.host: "172.25.0.77" ##服务主机IP
elasticsearch.url: "http://172.25.0.78:9200" ##url调取elasticsearch
elasticsearch.url: "http://172.25.0.79:9200" ##url调取elasticsearch
######启动kibana
service kibana start
查看端口是否存在
netstat -anp | grep 5601
tcp 0 0 172.25.0.77:5601 0.0.0.0:* LISTEN 8492/node
###web页面上访问
 
####安装elasticsearch-analysis-ik-5.0.0.zip 分词器
 
####安装elasticsearch-head插件
由于elasticsearch5.0版本变化较大,目前elasticsearch5.0 暂时不支持直接安装,但是head作者提供了另一种安装方法
###安装elasticsearch-head
cd /usr/share/elasticsearch
git clone git://github.com/mobz/elasticsearch-head.git
chmod -R 777 elasticsearch-head
cd elasticsearch-head/
PS:先修改license
vim package.json
"license": "Apache-2.0",
 
npm install --force ###安装完node就会有npm命令
cd /usr/share/elasticsearch/elasticsearch-head/node_modules/grunt/bin
./grunt server > /dev/null 2>&1 &
 
###安装node
node-v6.10.1-linux-x64.tar
解压
tar -xvf node-v6.10.1-linux-x64.tar
mv node-v6.10.1-linux-x64 /usr/share/elasticsearch/
vim /etc/profile
export NODE_HOME=/usr/share/elasticsearch/node-v6.10.1-linux-x64
export PATH=$PATH:$NODE_HOME/bin
source /etc/profile
cd /usr/share/elasticsearch/node-v6.10.1-linux-x64/bin/
./node -v
v6.10.1
npm -v
3.10.10
###安装grunt
######cd /usr/share/elasticsearch/elasticsearch-head
######npm install grunt-cli
###### npm install -g grunt --registry=https://registry.npm.taobao.org
grunt --version
#### 修改head目录下的Gruntfile.js配置,head默认监听127.0.0.1
vim Gruntfile.js
hostname: '*',
connect: {
server: {
options: {
port: 9100,
hostname: '*', ####修改此处
base: '.',
keepalive: true
}
}
cd _site/
vim app.js
this.base_uri = this.config.base_uri || this.prefs.get("app-base_uri") || "http://172.25.0.78:9200";
 
####elasticsearch配置允许跨域访问
修改elasticsearch配置文件elasticsearch.yml
cd /etc/elasticsearch/
vim elasticsearch.yml
http.cors.enabled: true
http.cors.allow-origin: "*"
 
####重启elasticsearch,并启动node
service elasticsearch restart ###启动elasticsearch
cd /usr/share/elasticsearch/elasticsearch-head/ ###启动node
①#######node_modules/grunt/bin/grunt server > /dev/null 2>&1 &
 
②#######/usr/share/elasticsearch/elasticsearch-head/node_modules/grunt/bin/grunt server > /dev/null 2>&1 &
 
查看集群信息
curl '172.25.0.78:9200 '
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
172.25.0.78 13 33 0 0.01 0.01 0.00 mdi - node-1
172.25.0.79 10 33 0 0.00 0.02 0.00 mdi * node-2
 
#####安装x-pack(marvel) 进行性能监控
配置dns
vim /etc/resolv.conf
nameserver 210.73.88.1
 
kibana上安装server端
cd /usr/share/kibana/
bin/kibana-plugin install x-pack
 
es上安装agent端
cd /usr/share/elasticsearch/
bin/elasticsearch-plugin install x-pack
 
卸载
bin/elasticsearch-plugin remove x-pack
 
PS:
安装完需要重启kibana和es 默认用户名密码elastic/changeme
 
查看license时间 默认一个月
curl -XGET -u elastic:1qaz2wsx 'http://172.25.0.78:9200/_xpack/license'
替换license 默认密码changeme
curl -XPUT -u elastic 'http://172.25.0.78:9200/_xpack/license?acknowledge=true' -H "Content-Type: application/json" -d @zhaishaomin-fa370f7b-6eb7-4730-9b73-89b3d93471e3-v5.json
 
由于是基本的license,所以需要禁用安全性
vim /etc/kibana/kibana.yml
vim /etc/elasticsearch/elasticsearch.yml
xpack.security.enabled: false ####false为禁用
 
####安装logstash
rpm -ivh logstash-5.0.0.rpm
############################################################################
PS:安装出现一下错误:
warning: rpmts_HdrFromFdno: Header V3 RSA/SHA256 Signature, key ID fd431d51: NOKEY 
解决:
rpm --import /etc/pki/rpm-gpg/RPM*
############################################################################
cd /usr/share/logstash/
mkdir config
cp /etc/logstash/logstash.yml /usr/share/logstash/config/
cp /etc/logstash/log4j2.properties /usr/share/logstash/config/
######cd /etc/logstash/conf.d#######
vim input.conf
input{
#stdin{}
file{
path => "/opt/soft/sub-test.cm-inv.com_access_80.log"
start_position => "beginning" ####end(从末尾读)beginning(从头开始读)
stat_interval => 1
type => "access_log"
tags=> ["acc"]
}
 
}
 
filter{
grok{
match => [ "message" , "\[%{HTTPDATE:time_local}\] \"%{IP:client_ip}\" \"%{WORD:verb} (%{NOTSPACE:request}|-) (HTTP/%{NUMBER:http_version}|-)\" \"(%{GREEDYDATA:http_cookie}|-)\" \"(%{WORD:http_x_forword_for}|-)\" (%{GREEDYDATA:nomean}|-) (?<user>[a-zA-Z._-]+|-)(?<status>[0-9._-]+|-) (?:%{NUMBER:body_bytes_sent:int}|-) (%{BASE16FLOAT:request_time:float}|-) \"(%{GREEDYDATA:http_did}|-)\" \"(%{GREEDYDATA:http_x_up_calling_line_id}|-)\" \"(%{NOTSPACE:http_referrer}|-)\" \"%{GREEDYDATA:http_user_agent}\" (%{BASE16FLOAT}|-)(%{NUMBER:content_length}|-)(%{GREEDYDATA:upstream_addr}|-)"]
remove_field => [ "message","host","@version" ]
}
date{
match=>["timestamp","dd/MMM/yyyy:HH:mm:ss Z"]
target=>"@timestamp"
}
geoip{
source => "client_ip"
}
}
###########
output{
elasticsearch {
hosts => "192.168.125.129:9200"
index => "logstash-%{type}-%{+YYYY.MM.dd}"
}
stdout {codec=> rubydebug }
}
################
/usr/share/logstash/bin/logstash -f input.conf
 
 
bin/logstash  -e  ‘input { stdin {} } output { stdout {codec=>rubydebug} }’
 
 
删除一个月的索引脚本
#!/bin/bash
data=`date -d '-1 months' +%Y-%m-%d`
curl -XDELETE 'http://172.25.0.77:9200/*-'${data}''
 
 
ELK学习10_ELK系列--实时日志分析系统ELK 部署与运行中的问题汇总 - wang_zhenwei的博客 - CSDN博客 http://blog.csdn.net/wang_zhenwei/article/details/50372000
 

ELK5.0搭建部署的更多相关文章

  1. AI识万物:从0搭建和部署手语识别系统 ⛵

    作者:韩信子@ShowMeAI 深度学习实战系列:https://www.showmeai.tech/tutorials/42 计算机视觉实战系列: https://www.showmeai.tech ...

  2. Rancher2.0中部署Longhorn分布式存储实验

    目录 1.简介 2.实验环境 3.应用商店中部署longhorn 4.创建工作负载,使用longhorn存储 5.查看longhorn UI 6.注意事项 1.简介: Longhorn是Rancher ...

  3. 云服务器+tomcat+mysql+web项目搭建部署

    云服务器+tomcat+mysql+web项目搭建部署 1.老样子,开头墨迹两句. 作为我的第二篇文章,有很多感慨,第一篇人气好低啊,有点小丧气,不过相信我还是经验少,分享的都是浅显的,所以大家可能不 ...

  4. Extjs6.2.0搭建项目框架

    1.安装 首先你总要去官网下载ext-6.2.0-gpl.zip和安装Sencha CMD工具来管理ExtJs项目,ext-6.2.0-gpl.zip下载完成解压先放在一边,一会用到. Sencha ...

  5. IIS 6.0上部署ASP.NET MVC2.0

    在IIS7.5及8.0上部署都没有成功,对于身份验证会出现问题,据说是要安装什么东西,在这里说下IIS6.0的配置吧,下面是使用.net 4.0,自己可以选择所需的版本. 再此之前先确定web是用到了 ...

  6. Ubuntu14.04下Ambari安装搭建部署大数据集群(图文分五大步详解)(博主强烈推荐)

    不多说,直接上干货! 写在前面的话 (1) 最近一段时间,因担任我团队实验室的大数据环境集群真实物理机器工作,至此,本人秉持负责.认真和细心的态度,先分别在虚拟机上模拟搭建ambari(基于CentO ...

  7. Spark集群基于Zookeeper的HA搭建部署笔记(转)

    原文链接:Spark集群基于Zookeeper的HA搭建部署笔记 1.环境介绍 (1)操作系统RHEL6.2-64 (2)两个节点:spark1(192.168.232.147),spark2(192 ...

  8. nginx Win下实现简单的负载均衡(1)nginx搭建部署

    快速目录: 一.nginx Win下实现简单的负载均衡(1)nginx搭建部署 二.nginx Win下实现简单的负载均衡(2)站点共享Session 三.nginx Win下实现简单的负载均衡(3) ...

  9. zabbix环境搭建部署(一)

     Linux高端架构师课程 Linux实战运维国内NO.1全套视频课程 QQ咨询:397824870  > 监控报警 > zabbix环境搭建部署(一) zabbix环境搭建部署(一) 监 ...

随机推荐

  1. DELPHI XE5安装

    1.安装XE5 2. HNFJ-DPADCW-BDWCFU-FPNN QDF4-CTSDHV-RDFCFE-FEAN HNFK-BCN8NN-78N53D-H4RS 破解补丁使用方法: (1).复制压 ...

  2. vue-devtools vue开发调试神器

    前言: 由于vue是数据驱动的,所以这就存在在开发调试中查看DOM结构并不能解析出什么. 但是借助vue-devtools插件,我们就可以很容易的对数据结构进行解析和调试. 一.下载chrome扩展插 ...

  3. Django学习(八)---修改文章和添加文章

    博客页面的修改文章和添加新文章 从主页点击不同文章的超链接进入文章页面,就是传递了一个id作为参数,然后后台代码根据这个参数从数据库中取出来对应的文章,并把它传递到前端页面 修改文章和添加新文章,是要 ...

  4. hdu_1695: GCD 【莫比乌斯反演】

    题目链接 这题求[1,n],[1,m]gcd为k的对数.而且没有顺序. 设F(n)为公约数为n的组数个数 f(n)为最大公约数为n的组数个数 然后在纸上手动验一下F(n)和f(n)的关系,直接套公式就 ...

  5. 第2章 rsync算法原理和工作流程分析

    本文通过示例详细分析rsync算法原理和rsync的工作流程,是对rsync官方技术报告和官方推荐文章的解释. 以下是本文的姊妹篇: 1.rsync(一):基本命令和用法 2.rsync(二):ino ...

  6. tensorflow bias_add应用

    import tensorflow as tf a=tf.constant([[1,1],[2,2],[3,3]],dtype=tf.float32) b=tf.constant([1,-1],dty ...

  7. 再起航,我的学习笔记之JavaScript设计模式02

    我的学习笔记是根据我的学习情况来定期更新的,预计2-3天更新一章,主要是给大家分享一下,我所学到的知识,如果有什么错误请在评论中指点出来,我一定虚心接受,那么废话不多说开始我们今天的学习分享吧! 我们 ...

  8. 【css】主要的块状元素(block element)和内联元素(inline element行内元素)

      内联元素:只在行内发生作用,设置宽高不起作用,不会影响文字内容,使其换行等.竖直方向和间距也不起作用   display可以强制转换行内元素和块状元素,还可以取消显示none   块元素(bloc ...

  9. Thread(线程)四

    今天继续讲讲线程的异常处理.线程取消.多线程的临时变量和线程安全lock的问题. 1.异步处理. 一般来说如果是同步方法的异步处理,我们大多都是try catch住,但是异步方法应该怎么做呢. #re ...

  10. 最小截断[AHOI2009]

    [题目描述] 宇宙旅行总是出现一些意想不到的问题,这次小可可所驾驶的宇宙飞船所停的空间站发生了故障,这个宇宙空间站非常大,它由N个子站组成,子站之间有M条单向通道,假设其中第i(1<=i< ...