Linux服务器性能日志收集和分析脚本(转)
最近老大要求分析服务器的性能数据,找到服务器运行的性能瓶颈,结果花了两天时间,写了两个脚本可以生成日志并可以进行数据提取,最终生成数据可以放到excel生成报表。过程中也学到了不少shell编程技术。
收集性能数据系统日志,每3秒收集一次,将脚本放到后台运行就行。
- #!/bin/sh
- while :
- do
- iostat -x -t >> /var/log/jciostat.log
- vmstat -t -S M >> /var/log/jcvmstat.log
- free -g >> /var/log/jcfree_g.log
- top -b -n 1 |head -5 >> /var/log/jctop.log
- sar -P ALL 1 1 | grep : | grep all | cut -d: -f2 >> /var/log/jccpu.log
- sar -n DEV 1 1 | grep : | cut -d: -f2 >> /var/log/jcnetwork.log
- if [ -f "/var/log/jciostat.log" ];then
- if [ $(stat -c "%s" /var/log/jciostat.log) -gt $((100*1024*1024)) ];then
- # file size is greater more than 200MB,clean file data
- cd /var/log/ >/dev/null 2>&1
- tar czvf jc.log.tar.gz jciostat.log jcvmstat.log jcfree_g.log jctop.log > /dev/null 2>&1
- echo "" > /var/log/jciostat.log
- echo "" > /var/log/jcvmstat.log
- echo "" > /var/log/jcfree_g.log
- echo "" > /var/log/jctop.log
- cd - > /dev/null 2>&1
- fi
- fi
- sleep 1
- done
日志文件分析脚本
- #!/bin/sh
- print_help()
- {
- echo "use age: analyz.sh -day <day> -start <start time> -end <end time> -<option1> <colum1,colum2...> -<option2> <colum1,colum2...> -<option3> <colum1,colum2...>"
- echo "day: YYYY-MM-DD"
- echo "start time:HH:MM:SS"
- echo "end time:HH:MM:SS"
- echo " 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17"
- echo "-vmstat: r b swpd free buff cache si so bi bo in cs us sy id wa st"
- echo "-sda: rrqm/s wrqm/s r/s w/s rsec/s wsec/s avgrq-sz avgqu-sz await svctm %util"
- echo "-sdb: rrqm/s wrqm/s r/s w/s rsec/s wsec/s avgrq-sz avgqu-sz await svctm %util"
- echo "-network rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s"
- echo "-cpu us ni sy wa st id"
- echo "-mem: total used free shared buffers cached"
- echo "-swap: total used free"
- echo "-la(load average): 5min 10min 15min"
- echo "-network <netdev:[cloudbr0/bond0/eth0...]> <colum1,colum2...>"
- echo "example:$0 -sda 1,2,3 -sdb 10,11,12 -network cloudbr0 2,3,4 -swap 3,4 -day 2016-07-08 -start 07:00:00 -end 08:00:00"
- }
- cp /var/log/jc*.log ./
- day=""
- start=""
- end=""
- vmstat=""
- sda=""
- sdb=""
- mem=""
- swap=""
- la=""
- cpu=""
- network=""
- netdev=""
- while [ -n "$1" ]
- do
- case "$1" in
- "-vmstat")
- vmstat=$2
- shift
- ;;
- "-sda")
- sda=$2
- shift
- ;;
- "-sdb")
- sdb=$2
- shift
- ;;
- "-mem")
- mem=$2
- shift
- ;;
- "-swap")
- swap=$2
- shift
- ;;
- "-la")
- la=$2
- shift
- ;;
- "-day")
- day=$2
- shift
- ;;
- "-start")
- start=$2
- shift
- ;;
- "-end")
- end=$2
- shift
- ;;
- "-cpu")
- cpu=$2
- shift
- ;;
- "-network")
- netdev=$2
- network=$3
- shift
- shift
- ;;
- "--help")
- print_help
- exit 0
- ;;
- *)
- echo "$1 is not an option"
- ;;
- esac
- shift
- done
- # 第一步:生成独立的日志csv文件
- if [ ! -z $vmstat ];then
- colum_name=("CST" "vmstat_r" "vmstat_b" "vmstat_swpd" "vmstat_free" "vmstat_buff" "vmstat_cache" "vmstat_si" "vmstat_so" "vmstat_bi" "vmstat_bo" "vmstat_in" "vmstat_cs" "vmstat_us" "vmstat_sy" "vmstat_id" "vmstat_wa" "vmstat_st")
- OLD_IFS="$IFS"
- IFS=","
- colums=($vmstat)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > vmstat.csv1
- # 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行
- echo '#!/bin/sh' > vmstat.sh
- echo "grep ${colum_name[0]} jcvmstat.log | gawk '{print $o_colum}' >> vmstat.csv1" >> vmstat.sh
- chmod u+x vmstat.sh
- ./vmstat.sh
- rm -rf vmstat.sh
- fi
- if [ ! -z $sda ];then
- colum_name=("sda" "" "sda_rrqm/s" "sda_wrqm/s" "sda_r/s" "sda_w/s" "sda_rsec/s" "sda_wsec/s" "sda_avgrq-sz" "sda_avgqu-sz" "sda_await" "sda_svctm" "sda_%util")
- OLD_IFS="$IFS"
- IFS=","
- colums=($sda)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > sda_io.csv1
- # 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行
- echo '#!/bin/sh' > sda.sh
- echo "grep ${colum_name[0]} jciostat.log | gawk '{print $o_colum}' >> sda_io.csv1" >> sda.sh
- chmod u+x sda.sh
- ./sda.sh
- rm -rf sda.sh
- fi
- if [ ! -z $sdb ];then
- colum_name=("sdb" "" "sdb_rrqm/s" "sdb_wrqm/s" "sdb_r/s" "sdb_w/s" "sdb_rsec/s" "sdb_wsec/s" "sdb_avgrq-sz" "sdb_avgqu-sz" "sdb_await" "sdb_svctm" "sdb_%util")
- OLD_IFS="$IFS"
- IFS=","
- colums=($sdb)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > sdb_io.csv1
- # 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行
- echo '#!/bin/sh' > sdb.sh
- echo "grep ${colum_name[0]} jciostat.log | gawk '{print $o_colum}' >> sdb_io.csv1" >> sdb.sh
- chmod u+x sdb.sh
- ./sdb.sh
- rm -rf sdb.sh
- fi
- if [ ! -z $mem ];then
- colum_name=("Mem" "" "mem_total" "mem_used" "mem_free" "shared" "buffers" "cached")
- OLD_IFS="$IFS"
- IFS=","
- colums=($mem)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > mem_used.csv1
- echo '#!/bin/sh' > mem.sh
- echo "grep ${colum_name[0]} jcfree_g.log | gawk '{print $o_colum}' >> mem_used.csv1" >> mem.sh
- chmod u+x mem.sh
- ./mem.sh
- rm -rf mem.sh
- fi
- if [ ! -z $swap ];then
- colum_name=("Swap" "" "swap_total" "swap_used" "swap_free")
- OLD_IFS="$IFS"
- IFS=","
- colums=($swap)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > swap_used.csv1
- echo '#!/bin/sh' > swap.sh
- echo "grep ${colum_name[0]} jcfree_g.log | gawk '{print $o_colum}' >> swap_used.csv1" >> swap.sh
- chmod u+x swap.sh
- ./swap.sh
- rm -rf swap.sh
- fi
- if [ ! -z $la ];then
- colum_name=("load average" "load_5min" "load_10min" "load_15min")
- OLD_IFS="$IFS"
- IFS=","
- colums=($la)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > load.csv1
- echo '#!/bin/sh' > la.sh
- echo "grep \"${colum_name[0]}\" jctop.log | cut -d, -f3,4,5 | cut -d: -f2 | gawk '{print $o_colum}'>> load.csv1" >> la.sh
- chmod u+x la.sh
- ./la.sh
- rm -rf la.sh
- fi
- if [ ! -z $cpu ];then
- colum_name=("all" "" "us" "ni" "sy" "wa" "st" "id")
- OLD_IFS="$IFS"
- IFS=","
- colums=($cpu)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > cpu.csv1
- echo '#!/bin/sh' > cpu.sh
- echo "grep \"${colum_name[0]}\" jccpu.log | gawk '{print $o_colum}'>> cpu.csv1" >> cpu.sh
- chmod u+x cpu.sh
- ./cpu.sh
- rm -rf cpu.sh
- fi
- if [ ! -z $network ];then
- colum_name=("" "" "rxpck/s" "txpck/s" "rxkB/s" "txkB/s" "rxcmp/s" "txcmp/s" "rxmcst/s")
- OLD_IFS="$IFS"
- IFS=","
- colums=($network)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]}"_"${netdev},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > network.csv1
- echo '#!/bin/sh' > network.sh
- echo "grep \"$netdev\" jcnetwork.log | gawk '{print $o_colum}'>> network.csv1" >> network.sh
- chmod u+x network.sh
- ./network.sh
- rm -rf network.sh
- fi
- #输出时间
- echo time > time.csv1
- grep "CST" jcvmstat.log | gawk {'print $18"/"$19'} >> time.csv1
- # 第二步:整合csv文件
- i=0 # next csv file
- j=0 # prev csv file
- csv_files=`ls *.csv1|grep -v "time.csv1"`
- for f in $csv_files
- do
- # 可能在行尾有逗号,删除这个逗号
- sed -i 's/,$//g' $f
- if [ $i -eq 0 ];then # first
- gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' time.csv1 $f > tmp$j.csv2
- i=$(($i+1))
- else # not first
- gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp$j.csv2 $f > tmp$i.csv2
- i=$(($i+1))
- j=$(($j+1))
- fi
- done
- i=$(($i-1))
- mv tmp$i.csv2 result.csv
- sed -i 's/time/ /g' result.csv
- #gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' time.csv swap_used.csv > tmp1.csv
- #gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp1.csv sda_used.csv > tmp2.csv
- #gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp2.csv sdb_used.csv > tmp3.csv
- #gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp3.csv load.csv > result.csv
- #sed -i 's/time/ /g' result.csv
- if [ ! -z $day ];then
- date_str=`echo $day | grep -E '^[0-9]{4}-[0-9]{2}-[0-9]{2}'`
- if [ ! -z "$date_str" ];then
- head -1 result.csv > $date_str.csv
- grep $date_str result.csv >> $date_str.csv
- sed -i 's/ //g' $date_str.csv
- if [ ! -z $start ] && [ ! -z $end ];then
- st=`echo $start | grep -E '^[0-9]{2}:[0-9]{2}:[0-9]{2}'`
- et=`echo $end | grep -E '^[0-9]{2}:[0-9]{2}:[0-9]{2}'`
- if [ ! -z $st ] && [ ! -z $et ];then
- stn=`echo $st|sed 's/://g'`
- etn=`echo $et|sed 's/://g'`
- filename=${date_str}-${stn}-${etn}.csv
- head -1 $date_str.csv > $filename
- lines=`cat $date_str.csv`
- for line in $lines
- do
- ctn=`echo $line | cut -d',' -f1|cut -d'/' -f2|sed 's/://g'`
- if [ `expr $ctn + 0` -gt `expr $stn + 0` ] && [ `expr $ctn + 0` -lt `expr $etn + 0` ];then
- echo $line >> $filename
- fi
- done
- else
- echo "Time foramt error.Please input HH-MM-SS"
- fi
- fi
- else
- echo "Date foramt error.Please input YYYY-MM-DD"
- fi
- fi
- rm -rf *.csv1
- rm -rf *.csv2
- rm -rf jc*.log
要生成 2016年7月8日 早上7点到8点之间内存的used和cache,swap的used和free,sda磁盘的%util 可以使用如下命令:
./analyz.sh -swap 3,4 -sda 12 -mem 3,7 -day 2016-07-08 -start 07:00:00 -end 08:00:00
将生成的csv文件用excel打开,就可以使用图表功能生成出性能曲线。
Linux服务器性能日志收集和分析脚本(转)的更多相关文章
- 在linux服务器下日志提取的python脚本(实现输入开始时间和结束时间打包该时间段内的文件)
1.需求:近期在提取linux服务器下的日志文件时总是需要人工去找某个时间段内的日志文件,很是枯燥乏味,于是乎,我就想着用python结合linux指令来写一个日志提取的脚本,于是就有了以下脚本文件: ...
- Linux服务器性能查看分析调优
一 linux服务器性能查看 1.1 cpu性能查看 1.查看物理cpu个数: cat /proc/cpuinfo |grep "physical id"|sort|uniq|wc ...
- Linux服务器性能分析与调优
一 linux服务器性能查看 1.1 cpu性能查看 1.查看物理cpu个数: cat /proc/cpuinfo |grep "physical id"|sort|uniq|wc ...
- Linux下单机部署ELK日志收集、分析环境
一.ELK简介 ELK是elastic 公司旗下三款产品ElasticSearch .Logstash .Kibana的首字母组合,主要用于日志收集.分析与报表展示. ELK Stack包含:Elas ...
- 20个Linux服务器性能调优技巧
Linux是一种开源操作系统,它支持各种硬件平台,Linux服务器全球知名,它和Windows之间最主要的差异在于,Linux服务器默认情况下一般不提供GUI(图形用户界面),而是命令行界面,它的主要 ...
- 【转】linux服务器性能查看
转载自https://blog.csdn.net/achenyuan/article/details/78974729 1.1 cpu性能查看 1.查看物理cpu个数: cat /proc/cpuin ...
- linux服务器性能查看
1.1 cpu性能查看 1.查看物理cpu个数: cat /proc/cpuinfo |grep "physical id"|sort|uniq|wc -l 2.查看每个物理cpu ...
- [转]20个你不得不知的Linux服务器性能调优技巧
Linux是一种开源操作系统,它支持各种硬件平台,Linux服务器全球知名,它和Windows之间最主要的差异在于,Linux服务器默认情况下一般不提供GUI(图形用户界面),而是命令行界面,它的主要 ...
- 检查Linux服务器性能
如果你的Linux服务器突然负载暴增,告警短信快发爆你的手机,如何在最短时间内找出Linux性能问题所在? 概述通过执行以下命令,可以在1分钟内对系统资源使用情况有个大致的了解. • uptime• ...
随机推荐
- 决定干点事儿--翻译一下《effective modern c++》
写了非常多关于C++11的博客.总是认为不踏实,非常多东西都是东拼西凑.市场上也非常少有C++11的优秀书籍,但幸运的是Meyers老爷子并没有闲赋.为我们带来了<effective moder ...
- 2017.5.1 使用fat jar插件来打包有引用外部jar包的项目
如果在程序开发时用到了第三方提供的API.jar包或者其他附属资源.在导出并生成项目的jar文件时,必须将第三方的文件一并导出,否则无法正确运行. 可以使用fat jar插件,下载地址:http:// ...
- resin后台输出中文乱码的解决办法!
resin后台输出中文乱码的解决办法! 学习了:https://blog.csdn.net/kobeguang/article/details/34116429 编辑conf/resin.con文件: ...
- MyEclipse导入Hibernate出现Path must include project and resource;/project name
如图,在MyEclipse 2014以下版本中都没遇见这个问题. 在导入Hibernate框架的时候,可以说真的随缘,运气不好,明明配置全都没问题,还是连续几次失败,这个时候除了烧高香拜拜,也只能靠百 ...
- 【Javascript 基础】使用数组
Javascript 数组的工作方式与大多数编程语言的数组类似. <!DOCTYPE html> <html lang="en"> <head> ...
- 【Excle数据透视表】如何新建数据透视表样式
如果觉得Excle给出的数据透视表样式不符合自己的心意,可以自己定义一个数据透视表样式 步骤1 单击数据透视表区域任意单元格→数据透视表工具→设计→样式组中的下拉按钮,打开数据透视表样式库→新建数据透 ...
- C# 指南之装箱与拆箱
基础 1.值类型 1.1 在栈上分配内存,在声明时初始化才能使用,不能为null. 1.2 值类型超出作用范围系统自动释放内存. 1.3 主要由两类组成:结构,枚举 结构分为以下几类 1.整形(Sby ...
- 【原创】分布式之数据库和缓存双写一致性方案解析(三) 前端面试送命题(二)-callback,promise,generator,async-await JS的进阶技巧 前端面试送命题(一)-JS三座大山 Nodejs的运行原理-科普篇 优化设计提高sql类数据库的性能 简单理解token机制
[原创]分布式之数据库和缓存双写一致性方案解析(三) 正文 博主本来觉得,<分布式之数据库和缓存双写一致性方案解析>,一文已经十分清晰.然而这一两天,有人在微信上私聊我,觉得应该要采用 ...
- IBM Rational AppScan使用详细说明
转自:http://www.nxadmin.com/tools/675.html 本文将详细介绍Appscan功能选项设置的细节,适合E文一般,初次接触Appscan的童鞋参考阅读. Appscan是 ...
- C语言中的signal函数
signal是一个系统调用.是一种特殊的中断,当某种特定的"软件中断"发生时.用于调用的程序.中断通常是程序运行中出现的特殊情况,如引用特殊内存中的非法地址, 浮点数被0除. si ...