#!/bin/sh
# Init script for logstash
# Maintained by Elasticsearch
# Generated by pleaserun.
# Implemented based on LSB Core 3.1:
# * Sections: 20.2, 20.3
#
### BEGIN INIT INFO
# Provides: logstash
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description:
# Description: Starts Logstash as a daemon.
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin
export PATH
if [ `id -u` -ne 0 ]; then
echo "You need root privileges to run this script"
exit 1
fi
name=logstash
pidfile="/var/run/$name.pid"
LS_USER=root
LS_GROUP=root
LS_HOME=/var/lib/logstash
LS_HEAP_SIZE="1000m"
LS_LOG_DIR=/var/log/logstash
LS_LOG_FILE="${LS_LOG_DIR}/$name.log"
LS_CONF_DIR=/etc/logstash/conf.d/
LS_OPEN_FILES=16384
LS_NICE=19
LS_OPTS=""
[ -r /etc/default/$name ] && . /etc/default/$name
[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name
program=/usr/share/logstash/bin/logstash
args="-f ${LS_CONF_DIR} -l ${LS_LOG_FILE} ${LS_OPTS}"
start() {
LS_JAVA_OPTS="${LS_JAVA_OPTS} -Djava.io.tmpdir=${LS_HOME}"
HOME=${LS_HOME}
export PATH HOME LS_HEAP_SIZE LS_JAVA_OPTS LS_USE_GC_LOGGING
# chown doesn't grab the suplimental groups when setting the user:group - so we have to do it for it.
# Boy, I hope we're root here.
SGROUPS=$(id -Gn "$LS_USER" | tr " " "," | sed 's/,$//'; echo '')
if [ ! -z $SGROUPS ]
then
EXTRA_GROUPS="--groups $SGROUPS"
fi
# set ulimit as (root, presumably) first, before we drop privileges
ulimit -n ${LS_OPEN_FILES}
# Run the program!
nice -n ${LS_NICE} chroot --userspec $LS_USER:$LS_GROUP $EXTRA_GROUPS / sh -c "
cd $LS_HOME
ulimit -n ${LS_OPEN_FILES}
exec \"$program\" $args
" > "${LS_LOG_DIR}/$name.stdout" 2> "${LS_LOG_DIR}/$name.err" &
# Generate the pidfile from here. If we instead made the forked process
# generate it there will be a race condition between the pidfile writing
# and a process possibly asking for status.
echo $! > $pidfile
echo "$name started."
return 0
}
stop() {
# Try a few times to kill TERM the program
if status ; then
pid=`cat "$pidfile"`
echo "Killing $name (pid $pid) with SIGTERM"
kill -TERM $pid
# Wait for it to exit.
for i in 1 2 3 4 5 ; do
echo "Waiting $name (pid $pid) to die..."
status || break
sleep 1
done
if status ; then
if [ "$KILL_ON_STOP_TIMEOUT" -eq 1 ] ; then
echo "Timeout reached. Killing $name (pid $pid) with SIGKILL. This may result in data loss."
kill -KILL $pid
echo "$name killed with SIGKILL."
else
echo "$name stop failed; still running."
fi
else
echo "$name stopped."
fi
fi
}
status() {
if [ -f "$pidfile" ] ; then
pid=`cat "$pidfile"`
if kill -0 $pid > /dev/null 2> /dev/null ; then
# process by this pid is running.
# It may not be our pid, but that's what you get with just pidfiles.
# TODO(sissel): Check if this process seems to be the same as the one we
# expect. It'd be nice to use flock here, but flock uses fork, not exec,
# so it makes it quite awkward to use in this case.
return 0
else
return 2 # program is dead but pid file exists
fi
else
return 3 # program is not running
fi
}
force_stop() {
if status ; then
stop
status && kill -KILL `cat "$pidfile"`
fi
}
case "$1" in
start)
status
code=$?
if [ $code -eq 0 ]; then
echo "$name is already running"
else
start
code=$?
fi
exit $code
;;
stop) stop ;;
force-stop) force_stop ;;
status)
status
code=$?
if [ $code -eq 0 ] ; then
echo "$name is running"
else
echo "$name is not running"
fi
exit $code
;;
restart)
stop && start
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|force-stop|status|restart}" >&2
exit 3
;;
esac
exit $?
######安装redis
# yum –y install tcl gcc //安装测试依赖工具
# tar xf redis-3.2.3.tar.gz //解压至当前目录
# cd redis-3.2.3 //切换至redis源码目录
# make //编译
# make test //有可能会失败,只是看看,不用在意
# make install //安装
# mkdir /opt/redis/{db,conf} -pv //创建redis安装目录
# cp redis.conf /opt/redis/conf/ //复制配置文件至redis安装目录
# cd src
# cp redis-benchmark redis-check-aof redis-cli redis-server mkreleasehdr.sh /opt/redis/ //复制各文件至redis安装目录
# ln -sv /opt/redis/redis-cli /usr/bin/ //将redis-cli可执行文件链接至/usr/bin目录,方便在任何目录下都可直接执行`redis-cli`
# vim /opt/redis/conf/redis.conf //修改redis.conf 中的 `daemonize`为`yes`,让server以守护进程在后台执行,这一步可以不做,因为后面要执行的脚本会自动创建这个文件,且这个值会设置为`yes`
daemonize yes
make install仅仅在你的系统上安装了二进制文件,不会替你默认配置init脚本和配置文件,为了把它用在生产环境而安装它,在源码目录的utils目录下Redis为系统提供了
这样的一个脚本install_server.sh
# cd /opt/soft/redis-3.2.3/utils
# ./install_server.sh //执行sh格式的安装脚本
Welcome to the redis service installer
This script will help you easily set up a running redis server
Please select the redis port for this instance: [6379]
Selecting default: 6379
Please select the redis config file name [/etc/redis/6379.conf] /opt/redis/conf/redis.conf
Please select the redis log file name [/var/log/redis_6379.log]
Selected default - /var/log/redis_6379.log
Please select the data directory for this instance [/var/lib/redis/6379] /opt/redis/db/6379.db
Please select the redis executable path [/usr/local/bin/redis-server]
Selected config:
Port : 6379
Config file : /opt/redis/conf/redis.conf
Log file : /var/log/redis_6379.log
Data dir : /opt/redis/db/6379.db
Executable : /usr/local/bin/redis-server
Cli Executable : /usr/local/bin/redis-cli
Is this ok? Then press ENTER to go on or Ctrl-C to abort.
Copied /tmp/6379.conf => /etc/init.d/redis_6379
Installing service...
Successfully added to chkconfig!
Successfully added to runlevels 345!
Starting Redis server...
Installation successful!
# chkconfig --add redis_6379 //将redis加入系统服务
# chkconfig redis_6379 on //加入开机启动
# vim /opt/redis/conf/redis.conf
requirepass Carsing2582# //设置密码【坑1】:这步先不做,如果做了后面的测试连接redis的时候就需要认证)
bind 0.0.0.0 //redis要打开远程访问允许,在 redis.conf 中 bind 这一行改成 0.0.0.0(运行任何机器连接,为了测试先这样)【此为坑2】
# /etc/init.d/redis_6379 restart
# ss -an| grep 6379 //端口已经被监听,证明服务已提供
LISTEN 0 128 127.0.0.1:6379 *:*
TIME-WAIT 0 0 127.0.0.1:6379 127.0.0.1:34506
###修改 elasticsearch配置文件 存放所收集到的日志
cd /etc/elasticsearch/
vim elasticsearch.yml
cluster.name: ELK ###集群名称
node.name: node-1 ###节点编号
node.master: true
node.data: true
path.data: /ELK/data ###elasticsearch数据路径
path.logs: /ELK/logs ###elasticsearch日志路径
network.host: zmt-elk02 ###本机主机名
discovery.zen.ping.unicast.hosts: ["zmt-elk02", "zmt-elk03"] ###自动发现节点主机名
discovery.zen.minimum_master_nodes: 2
http.cors.enabled: true
http.cors.allow-origin: "*"
xpack.security.enabled: false
启动异常:ERROR: bootstrap checks failed
system call filters failed to install; check the logs and fix your configuration or disable system call filters at your own risk
问题原因:因为Centos6不支持SecComp,而ES5.2.1默认bootstrap.system_call_filter为true进行检测,所以导致检测失败,失败后直接导致ES不能启动。详见 :https://github.com/elastic/elasticsearch/issues/22899
解决方法:在elasticsearch.yml中配置bootstrap.system_call_filter为false,注意要在Memory下面:
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
设置线程池
thread_pool:
bulk:
size: 5
queue_size: 1000
search:
size: 5
queue_size: 1000
index:
size: 5
queue_size: 1000
#xpack.monitoring.enabled: false
#xpack.monitoring.exporters.my_local:
# type: local
indices.cache.filter.expire: 30d ###设置索引30天过期
index.cache.filter: 30d
vim /etc/elasticsearch/jvm.options ###内存酌情分配
-Xms8g
-Xmx8g
vi /etc/security/limits.conf
elasticsearch soft memlock unlimited
elasticsearch hard memlock unlimited
elasticsearch soft nofile 65536
elasticsearch hard nofile 131072
elasticsearch soft nproc 2048
elasticsearch hard nproc 4096
* soft nofile 65536
* hard nofile 65536
* soft memlock unlimited
* hard memlock unlimited
创建ELK数据、日志存放路径
mkdir /ELK/data
mkdir /ELK/logs
chown -R elasticsearch:elasticsearch /ELK
启动服务
service elasticsearch start
启动服务之后查看/ELK/logs路径中有日志,说明服务正常
查看端口9200、9300
9200端口是API 用来连接服务的接口
9300是内部服务
PS:报错
max number of threads [1024] for user [lish] likely too low, increase to at least [2048]
解决:切换到root用户,进入limits.d目录下修改配置文件。
vi /etc/security/limits.d/90-nproc.conf
修改如下内容:
* soft nproc 1024
#修改为
* soft nproc 2048
##修改主机名,定义dns
vim /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=zmt-elk02
GATEWAY=172.25.0.254
vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.25.0.77 zmt-elk01
172.25.0.78 zmt-elk02
172.25.0.79 zmt-elk03
注:重启生效 reboot
###配置kibana
cd /etc/kibana/
vim kibana.yml
server.port: 5601 ##定义kibana端口
server.host: "172.25.0.77" ##服务主机IP
elasticsearch.url: "http://172.25.0.78:9200" ##url调取elasticsearch
elasticsearch.url: "http://172.25.0.79:9200" ##url调取elasticsearch
######启动kibana
service kibana start
查看端口是否存在
netstat -anp | grep 5601
tcp 0 0 172.25.0.77:5601 0.0.0.0:* LISTEN 8492/node
###web页面上访问
####安装elasticsearch-analysis-ik-5.0.0.zip 分词器
####安装elasticsearch-head插件
由于elasticsearch5.0版本变化较大,目前elasticsearch5.0 暂时不支持直接安装,但是head作者提供了另一种安装方法
###安装elasticsearch-head
cd /usr/share/elasticsearch
git clone git://github.com/mobz/elasticsearch-head.git
chmod -R 777 elasticsearch-head
cd elasticsearch-head/
PS:先修改license
vim package.json
"license": "Apache-2.0",
npm install --force ###安装完node就会有npm命令
cd /usr/share/elasticsearch/elasticsearch-head/node_modules/grunt/bin
./grunt server > /dev/null 2>&1 &
###安装node
node-v6.10.1-linux-x64.tar
解压
tar -xvf node-v6.10.1-linux-x64.tar
mv node-v6.10.1-linux-x64 /usr/share/elasticsearch/
vim /etc/profile
export NODE_HOME=/usr/share/elasticsearch/node-v6.10.1-linux-x64
export PATH=$PATH:$NODE_HOME/bin
source /etc/profile
cd /usr/share/elasticsearch/node-v6.10.1-linux-x64/bin/
./node -v
v6.10.1
npm -v
3.10.10
###安装grunt
######cd /usr/share/elasticsearch/elasticsearch-head
######npm install grunt-cli
grunt --version
#### 修改head目录下的Gruntfile.js配置,head默认监听127.0.0.1
vim Gruntfile.js
hostname: '*',
connect: {
server: {
options: {
port: 9100,
hostname: '*', ####修改此处
base: '.',
keepalive: true
}
}
cd _site/
vim app.js
this.base_uri = this.config.base_uri || this.prefs.get("app-base_uri") || "http://172.25.0.78:9200";
####elasticsearch配置允许跨域访问
修改elasticsearch配置文件elasticsearch.yml
cd /etc/elasticsearch/
vim elasticsearch.yml
http.cors.enabled: true
http.cors.allow-origin: "*"
####重启elasticsearch,并启动node
service elasticsearch restart ###启动elasticsearch
cd /usr/share/elasticsearch/elasticsearch-head/ ###启动node
①#######node_modules/grunt/bin/grunt server > /dev/null 2>&1 &
②#######/usr/share/elasticsearch/elasticsearch-head/node_modules/grunt/bin/grunt server > /dev/null 2>&1 &
查看集群信息
curl '172.25.0.78:9200 '
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
172.25.0.78 13 33 0 0.01 0.01 0.00 mdi - node-1
172.25.0.79 10 33 0 0.00 0.02 0.00 mdi * node-2
#####安装x-pack(marvel) 进行性能监控
配置dns
vim /etc/resolv.conf
nameserver 210.73.88.1
kibana上安装server端
cd /usr/share/kibana/
bin/kibana-plugin install x-pack
es上安装agent端
cd /usr/share/elasticsearch/
bin/elasticsearch-plugin install x-pack
卸载
bin/elasticsearch-plugin remove x-pack
PS:
安装完需要重启kibana和es 默认用户名密码elastic/changeme
查看license时间 默认一个月
curl -XGET -u elastic:1qaz2wsx 'http://172.25.0.78:9200/_xpack/license'
替换license 默认密码changeme
curl -XPUT -u elastic 'http://172.25.0.78:9200/_xpack/license?acknowledge=true' -H "Content-Type: application/json" -d @zhaishaomin-fa370f7b-6eb7-4730-9b73-89b3d93471e3-v5.json
由于是基本的license,所以需要禁用安全性
vim /etc/kibana/kibana.yml
vim /etc/elasticsearch/elasticsearch.yml
xpack.security.enabled: false ####false为禁用
####安装logstash
rpm -ivh logstash-5.0.0.rpm
############################################################################
PS:安装出现一下错误:
warning: rpmts_HdrFromFdno: Header V3 RSA/SHA256 Signature, key ID fd431d51: NOKEY
解决:
rpm --import /etc/pki/rpm-gpg/RPM*
############################################################################
cd /usr/share/logstash/
mkdir config
cp /etc/logstash/logstash.yml /usr/share/logstash/config/
cp /etc/logstash/log4j2.properties /usr/share/logstash/config/
######cd /etc/logstash/conf.d#######
vim input.conf
input{
#stdin{}
file{
path => "/opt/soft/sub-test.cm-inv.com_access_80.log"
start_position => "beginning" ####end(从末尾读)beginning(从头开始读)
stat_interval => 1
type => "access_log"
tags=> ["acc"]
}
}
filter{
grok{
match => [ "message" , "\[%{HTTPDATE:time_local}\] \"%{IP:client_ip}\" \"%{WORD:verb} (%{NOTSPACE:request}|-) (HTTP/%{NUMBER:http_version}|-)\" \"(%{GREEDYDATA:http_cookie}|-)\" \"(%{WORD:http_x_forword_for}|-)\" (%{GREEDYDATA:nomean}|-) (?<user>[a-zA-Z._-]+|-)(?<status>[0-9._-]+|-) (?:%{NUMBER:body_bytes_sent:int}|-) (%{BASE16FLOAT:request_time:float}|-) \"(%{GREEDYDATA:http_did}|-)\" \"(%{GREEDYDATA:http_x_up_calling_line_id}|-)\" \"(%{NOTSPACE:http_referrer}|-)\" \"%{GREEDYDATA:http_user_agent}\" (%{BASE16FLOAT}|-)(%{NUMBER:content_length}|-)(%{GREEDYDATA:upstream_addr}|-)"]
remove_field => [ "message","host","@version" ]
}
date{
match=>["timestamp","dd/MMM/yyyy:HH:mm:ss Z"]
target=>"@timestamp"
}
geoip{
source => "client_ip"
}
}
###########
output{
elasticsearch {
hosts => "192.168.125.129:9200"
index => "logstash-%{type}-%{+YYYY.MM.dd}"
}
stdout {codec=> rubydebug }
}
################
/usr/share/logstash/bin/logstash -f input.conf
bin/logstash -e ‘input { stdin {} } output { stdout {codec=>rubydebug} }’
删除一个月的索引脚本
#!/bin/bash
data=`date -d '-1 months' +%Y-%m-%d`
curl -XDELETE 'http://172.25.0.77:9200/*-'${data}''