Linux服务器性能日志收集和分析脚本(转)
最近老大要求分析服务器的性能数据,找到服务器运行的性能瓶颈,结果花了两天时间,写了两个脚本可以生成日志并可以进行数据提取,最终生成数据可以放到excel生成报表。过程中也学到了不少shell编程技术。
收集性能数据系统日志,每3秒收集一次,将脚本放到后台运行就行。
- #!/bin/sh
- while :
- do
- iostat -x -t >> /var/log/jciostat.log
- vmstat -t -S M >> /var/log/jcvmstat.log
- free -g >> /var/log/jcfree_g.log
- top -b -n 1 |head -5 >> /var/log/jctop.log
- sar -P ALL 1 1 | grep : | grep all | cut -d: -f2 >> /var/log/jccpu.log
- sar -n DEV 1 1 | grep : | cut -d: -f2 >> /var/log/jcnetwork.log
- if [ -f "/var/log/jciostat.log" ];then
- if [ $(stat -c "%s" /var/log/jciostat.log) -gt $((100*1024*1024)) ];then
- # file size is greater more than 200MB,clean file data
- cd /var/log/ >/dev/null 2>&1
- tar czvf jc.log.tar.gz jciostat.log jcvmstat.log jcfree_g.log jctop.log > /dev/null 2>&1
- echo "" > /var/log/jciostat.log
- echo "" > /var/log/jcvmstat.log
- echo "" > /var/log/jcfree_g.log
- echo "" > /var/log/jctop.log
- cd - > /dev/null 2>&1
- fi
- fi
- sleep 1
- done
日志文件分析脚本
- #!/bin/sh
- print_help()
- {
- echo "use age: analyz.sh -day <day> -start <start time> -end <end time> -<option1> <colum1,colum2...> -<option2> <colum1,colum2...> -<option3> <colum1,colum2...>"
- echo "day: YYYY-MM-DD"
- echo "start time:HH:MM:SS"
- echo "end time:HH:MM:SS"
- echo " 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17"
- echo "-vmstat: r b swpd free buff cache si so bi bo in cs us sy id wa st"
- echo "-sda: rrqm/s wrqm/s r/s w/s rsec/s wsec/s avgrq-sz avgqu-sz await svctm %util"
- echo "-sdb: rrqm/s wrqm/s r/s w/s rsec/s wsec/s avgrq-sz avgqu-sz await svctm %util"
- echo "-network rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s"
- echo "-cpu us ni sy wa st id"
- echo "-mem: total used free shared buffers cached"
- echo "-swap: total used free"
- echo "-la(load average): 5min 10min 15min"
- echo "-network <netdev:[cloudbr0/bond0/eth0...]> <colum1,colum2...>"
- echo "example:$0 -sda 1,2,3 -sdb 10,11,12 -network cloudbr0 2,3,4 -swap 3,4 -day 2016-07-08 -start 07:00:00 -end 08:00:00"
- }
- cp /var/log/jc*.log ./
- day=""
- start=""
- end=""
- vmstat=""
- sda=""
- sdb=""
- mem=""
- swap=""
- la=""
- cpu=""
- network=""
- netdev=""
- while [ -n "$1" ]
- do
- case "$1" in
- "-vmstat")
- vmstat=$2
- shift
- ;;
- "-sda")
- sda=$2
- shift
- ;;
- "-sdb")
- sdb=$2
- shift
- ;;
- "-mem")
- mem=$2
- shift
- ;;
- "-swap")
- swap=$2
- shift
- ;;
- "-la")
- la=$2
- shift
- ;;
- "-day")
- day=$2
- shift
- ;;
- "-start")
- start=$2
- shift
- ;;
- "-end")
- end=$2
- shift
- ;;
- "-cpu")
- cpu=$2
- shift
- ;;
- "-network")
- netdev=$2
- network=$3
- shift
- shift
- ;;
- "--help")
- print_help
- exit 0
- ;;
- *)
- echo "$1 is not an option"
- ;;
- esac
- shift
- done
- # 第一步:生成独立的日志csv文件
- if [ ! -z $vmstat ];then
- colum_name=("CST" "vmstat_r" "vmstat_b" "vmstat_swpd" "vmstat_free" "vmstat_buff" "vmstat_cache" "vmstat_si" "vmstat_so" "vmstat_bi" "vmstat_bo" "vmstat_in" "vmstat_cs" "vmstat_us" "vmstat_sy" "vmstat_id" "vmstat_wa" "vmstat_st")
- OLD_IFS="$IFS"
- IFS=","
- colums=($vmstat)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > vmstat.csv1
- # 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行
- echo '#!/bin/sh' > vmstat.sh
- echo "grep ${colum_name[0]} jcvmstat.log | gawk '{print $o_colum}' >> vmstat.csv1" >> vmstat.sh
- chmod u+x vmstat.sh
- ./vmstat.sh
- rm -rf vmstat.sh
- fi
- if [ ! -z $sda ];then
- colum_name=("sda" "" "sda_rrqm/s" "sda_wrqm/s" "sda_r/s" "sda_w/s" "sda_rsec/s" "sda_wsec/s" "sda_avgrq-sz" "sda_avgqu-sz" "sda_await" "sda_svctm" "sda_%util")
- OLD_IFS="$IFS"
- IFS=","
- colums=($sda)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > sda_io.csv1
- # 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行
- echo '#!/bin/sh' > sda.sh
- echo "grep ${colum_name[0]} jciostat.log | gawk '{print $o_colum}' >> sda_io.csv1" >> sda.sh
- chmod u+x sda.sh
- ./sda.sh
- rm -rf sda.sh
- fi
- if [ ! -z $sdb ];then
- colum_name=("sdb" "" "sdb_rrqm/s" "sdb_wrqm/s" "sdb_r/s" "sdb_w/s" "sdb_rsec/s" "sdb_wsec/s" "sdb_avgrq-sz" "sdb_avgqu-sz" "sdb_await" "sdb_svctm" "sdb_%util")
- OLD_IFS="$IFS"
- IFS=","
- colums=($sdb)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > sdb_io.csv1
- # 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行
- echo '#!/bin/sh' > sdb.sh
- echo "grep ${colum_name[0]} jciostat.log | gawk '{print $o_colum}' >> sdb_io.csv1" >> sdb.sh
- chmod u+x sdb.sh
- ./sdb.sh
- rm -rf sdb.sh
- fi
- if [ ! -z $mem ];then
- colum_name=("Mem" "" "mem_total" "mem_used" "mem_free" "shared" "buffers" "cached")
- OLD_IFS="$IFS"
- IFS=","
- colums=($mem)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > mem_used.csv1
- echo '#!/bin/sh' > mem.sh
- echo "grep ${colum_name[0]} jcfree_g.log | gawk '{print $o_colum}' >> mem_used.csv1" >> mem.sh
- chmod u+x mem.sh
- ./mem.sh
- rm -rf mem.sh
- fi
- if [ ! -z $swap ];then
- colum_name=("Swap" "" "swap_total" "swap_used" "swap_free")
- OLD_IFS="$IFS"
- IFS=","
- colums=($swap)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > swap_used.csv1
- echo '#!/bin/sh' > swap.sh
- echo "grep ${colum_name[0]} jcfree_g.log | gawk '{print $o_colum}' >> swap_used.csv1" >> swap.sh
- chmod u+x swap.sh
- ./swap.sh
- rm -rf swap.sh
- fi
- if [ ! -z $la ];then
- colum_name=("load average" "load_5min" "load_10min" "load_15min")
- OLD_IFS="$IFS"
- IFS=","
- colums=($la)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > load.csv1
- echo '#!/bin/sh' > la.sh
- echo "grep \"${colum_name[0]}\" jctop.log | cut -d, -f3,4,5 | cut -d: -f2 | gawk '{print $o_colum}'>> load.csv1" >> la.sh
- chmod u+x la.sh
- ./la.sh
- rm -rf la.sh
- fi
- if [ ! -z $cpu ];then
- colum_name=("all" "" "us" "ni" "sy" "wa" "st" "id")
- OLD_IFS="$IFS"
- IFS=","
- colums=($cpu)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > cpu.csv1
- echo '#!/bin/sh' > cpu.sh
- echo "grep \"${colum_name[0]}\" jccpu.log | gawk '{print $o_colum}'>> cpu.csv1" >> cpu.sh
- chmod u+x cpu.sh
- ./cpu.sh
- rm -rf cpu.sh
- fi
- if [ ! -z $network ];then
- colum_name=("" "" "rxpck/s" "txpck/s" "rxkB/s" "txkB/s" "rxcmp/s" "txcmp/s" "rxmcst/s")
- OLD_IFS="$IFS"
- IFS=","
- colums=($network)
- IFS="$OLD_IFS"
- o_colum=""
- o_colum_name=""
- for c in ${colums[@]}
- do
- if [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];then
- continue
- fi
- o_colum=${o_colum}\$$c\",\"
- o_colum_name=${o_colum_name}${colum_name[$c]}"_"${netdev},
- done
- o_colum=${o_colum%\"}
- o_colum=${o_colum%,}
- o_colum=${o_colum%\"}
- o_colum_name=${o_colum_name%,}
- echo $o_colum_name > network.csv1
- echo '#!/bin/sh' > network.sh
- echo "grep \"$netdev\" jcnetwork.log | gawk '{print $o_colum}'>> network.csv1" >> network.sh
- chmod u+x network.sh
- ./network.sh
- rm -rf network.sh
- fi
- #输出时间
- echo time > time.csv1
- grep "CST" jcvmstat.log | gawk {'print $18"/"$19'} >> time.csv1
- # 第二步:整合csv文件
- i=0 # next csv file
- j=0 # prev csv file
- csv_files=`ls *.csv1|grep -v "time.csv1"`
- for f in $csv_files
- do
- # 可能在行尾有逗号,删除这个逗号
- sed -i 's/,$//g' $f
- if [ $i -eq 0 ];then # first
- gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' time.csv1 $f > tmp$j.csv2
- i=$(($i+1))
- else # not first
- gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp$j.csv2 $f > tmp$i.csv2
- i=$(($i+1))
- j=$(($j+1))
- fi
- done
- i=$(($i-1))
- mv tmp$i.csv2 result.csv
- sed -i 's/time/ /g' result.csv
- #gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' time.csv swap_used.csv > tmp1.csv
- #gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp1.csv sda_used.csv > tmp2.csv
- #gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp2.csv sdb_used.csv > tmp3.csv
- #gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp3.csv load.csv > result.csv
- #sed -i 's/time/ /g' result.csv
- if [ ! -z $day ];then
- date_str=`echo $day | grep -E '^[0-9]{4}-[0-9]{2}-[0-9]{2}'`
- if [ ! -z "$date_str" ];then
- head -1 result.csv > $date_str.csv
- grep $date_str result.csv >> $date_str.csv
- sed -i 's/ //g' $date_str.csv
- if [ ! -z $start ] && [ ! -z $end ];then
- st=`echo $start | grep -E '^[0-9]{2}:[0-9]{2}:[0-9]{2}'`
- et=`echo $end | grep -E '^[0-9]{2}:[0-9]{2}:[0-9]{2}'`
- if [ ! -z $st ] && [ ! -z $et ];then
- stn=`echo $st|sed 's/://g'`
- etn=`echo $et|sed 's/://g'`
- filename=${date_str}-${stn}-${etn}.csv
- head -1 $date_str.csv > $filename
- lines=`cat $date_str.csv`
- for line in $lines
- do
- ctn=`echo $line | cut -d',' -f1|cut -d'/' -f2|sed 's/://g'`
- if [ `expr $ctn + 0` -gt `expr $stn + 0` ] && [ `expr $ctn + 0` -lt `expr $etn + 0` ];then
- echo $line >> $filename
- fi
- done
- else
- echo "Time foramt error.Please input HH-MM-SS"
- fi
- fi
- else
- echo "Date foramt error.Please input YYYY-MM-DD"
- fi
- fi
- rm -rf *.csv1
- rm -rf *.csv2
- rm -rf jc*.log
要生成 2016年7月8日 早上7点到8点之间内存的used和cache,swap的used和free,sda磁盘的%util 可以使用如下命令:
./analyz.sh -swap 3,4 -sda 12 -mem 3,7 -day 2016-07-08 -start 07:00:00 -end 08:00:00
将生成的csv文件用excel打开,就可以使用图表功能生成出性能曲线。
Linux服务器性能日志收集和分析脚本(转)的更多相关文章
- 在linux服务器下日志提取的python脚本(实现输入开始时间和结束时间打包该时间段内的文件)
1.需求:近期在提取linux服务器下的日志文件时总是需要人工去找某个时间段内的日志文件,很是枯燥乏味,于是乎,我就想着用python结合linux指令来写一个日志提取的脚本,于是就有了以下脚本文件: ...
- Linux服务器性能查看分析调优
一 linux服务器性能查看 1.1 cpu性能查看 1.查看物理cpu个数: cat /proc/cpuinfo |grep "physical id"|sort|uniq|wc ...
- Linux服务器性能分析与调优
一 linux服务器性能查看 1.1 cpu性能查看 1.查看物理cpu个数: cat /proc/cpuinfo |grep "physical id"|sort|uniq|wc ...
- Linux下单机部署ELK日志收集、分析环境
一.ELK简介 ELK是elastic 公司旗下三款产品ElasticSearch .Logstash .Kibana的首字母组合,主要用于日志收集.分析与报表展示. ELK Stack包含:Elas ...
- 20个Linux服务器性能调优技巧
Linux是一种开源操作系统,它支持各种硬件平台,Linux服务器全球知名,它和Windows之间最主要的差异在于,Linux服务器默认情况下一般不提供GUI(图形用户界面),而是命令行界面,它的主要 ...
- 【转】linux服务器性能查看
转载自https://blog.csdn.net/achenyuan/article/details/78974729 1.1 cpu性能查看 1.查看物理cpu个数: cat /proc/cpuin ...
- linux服务器性能查看
1.1 cpu性能查看 1.查看物理cpu个数: cat /proc/cpuinfo |grep "physical id"|sort|uniq|wc -l 2.查看每个物理cpu ...
- [转]20个你不得不知的Linux服务器性能调优技巧
Linux是一种开源操作系统,它支持各种硬件平台,Linux服务器全球知名,它和Windows之间最主要的差异在于,Linux服务器默认情况下一般不提供GUI(图形用户界面),而是命令行界面,它的主要 ...
- 检查Linux服务器性能
如果你的Linux服务器突然负载暴增,告警短信快发爆你的手机,如何在最短时间内找出Linux性能问题所在? 概述通过执行以下命令,可以在1分钟内对系统资源使用情况有个大致的了解. • uptime• ...
随机推荐
- 【log4j】springboot项目启动 ,使用的druid数据源,log4j报错 log4j:WARN Please initialize the log4j system properly.
springboot项目启动 ,使用的druid数据源,log4j报错 -- :: --- [ restartedMain] o.hibernate.annotations.common.Versio ...
- DOM系统学习-基础
DOM介绍 DOM介绍: D 网页文档 O 对象,可以调用属性和方法 M 网页文档的树型结构 节点: DOM将树型结构理解为由节点组成. 节点种类: 元素节点.文本节点.属性节点等 查找元 ...
- ES6里关于作用域的拓展:块级作用域
过去,javascript缺乏块级作用域,var声明时的声明提升.属性变量等行为让人困惑.ES6的新语法可以帮助我们更好地控制作用域. 一.var声明 1.变量提升:var声明会发生“变量提升”现象, ...
- Python-爬虫-针对有frame框架的页面
有的页面会使用frame 框架,使用Selenium + PhantomJS 后并不会加载iframe 框架中的网页内容.iframe 框架相当于在页面中又加载了一个页面,需要使用Selenium 的 ...
- 转:mybatis3中@SelectProvider的使用技巧
mybatis3中@SelectProvider的使用技巧 mybatis的原身是ibatis,现在已经脱离了apache基金会,新官网是http://www.mybatis.org/. mybati ...
- 在web目录下 批量寻找配置文件信息
dir /s /b *.php *.inc *.conf *.config >>list.txt" W4 I2 U+ N/ B6 K @0 r r8 ^ T00LS: _$ j! ...
- C++ 11 可变模板参数的两种展开方式
#include <iostream> #include <string> #include <stdint.h> template<typename T&g ...
- Linux(Centos)——下升级python3.3
CentOS下的Python版本一般都比较低,很多应用都需要升级python来完成.我装的centOS的默认的python版本是V2.4.3,但运行node.js需要的版本是2.5以上. 1.下载py ...
- STL学习笔记(迭代器配接器)
Reverse(逆向)迭代器 Reverse迭代器是一种配接器. 重新定义递增运算和递减运算.使其行为正好倒置. 如果你使用这类迭代器,算法将以逆向次序处理元素.所有标准容器都允许使用Reverse迭 ...
- 工作总结 1 sql写法 insert into select from 2 vs中 obj文件和bin文件 3 npoi 模板copy CopySheet 最好先全部Copy完后 再根据生成sheet写数据 4 sheet.CopyRow(rowsindex, rowsindex + x); 5 npoi 复制模板如果出现单元格显示问题
我们可以从一个表中复制所有的列插入到另一个已存在的表中: INSERT INTO table2SELECT * FROM table1; 或者我们可以只复制希望的列插入到另一个已存在的表中: INSE ...