#!/usr/bin/python
#-*- coding: utf-8 -*- #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
#Name : collMonitorDataToDB.py #
#Created : 2017/07/06 #
#Author : @ruiy #
#Version : 2.0 #
#Copyright : 2016 ~ 2017 ahwater.net Corporation.` #
#Description : collection monitor indicator data to DB. #
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# import pyodbc
import sys
import os
import commands
import datetime
import paramiko
import re
import ConfigParser
import codecs
import chardet
#import psutil #import sys
#reload(sys)
#sys.setdefaultencoding('utf8') #监控表字段主要:
# id,timestramp,location,ip,hostname,port,port_est_counts,cpu_use_ratio
# mem_total,mem_free,mem_use_ratio
# disk_drive_c_total,disk_drive_c_free,disk_drive_c_use_ratio
# send_flow,recv_flow,send_packets,recv_packets #
#内存
#总内存容量(单位-字节bytes): wmic memorychip get capacity
#剩余内存容量(单位-kbytes): wmic OS get FreePhysicalMemory #磁盘
#查看物理磁盘: wmic DISKDRIVE get deviceid,Caption,size,InterfaceType
#查看逻辑分区: wmic LOGICALDISK get name,Description,filesystem,size,freespace
#获取指定分区信息: fsutil volume diskfree c:
#获取磁盘分区total or fre 或用 wmic LOGICALDISK get name,Description,filesystem,size,freespace #cpu
#查看cpu核数: wmic cpu get name,addresswidth,processorid
#获取cpu实时使用率: wmic cpu get LoadPercentage #process
#process list: wmic process get Caption,KernelModeTime,UserModeTime #mystring.strip().replace(' ', '').replace('\n', '').replace('\t', '').replace('\r', '').strip() #网络流量信息 #database source read conf
dbconf=ConfigParser.SafeConfigParser()
#with codecs.open('../conf/config.properties','r',encoding='utf-8') as f:
# dbconf.readfp(f)
dbconf.read('../conf/config.properties') db_driver=dbconf.get('db','driver')
db_server=dbconf.get('db','server')
db_database=dbconf.get('db','database')
db_uid=dbconf.get('db','uid')
db_pwd=dbconf.get('db','pwd') #print type(db_server)
#print type(db_database)
#print type(db_uid)
#print type(db_pwd) #python conn sql server2008R2
#读取配置文件
"""
conn = pyodbc.connect(
driver='{sql server native client 10.0}';
server=%s;
database=%s;
uid=%s;
pwd=%s;
)
""" #debug
#conn = pyodbc.connect('driver={sql server native client 10.0};server=%s;database=%s;uid=%s;pwd=%s;'%(db_server,db_database,db_uid,db_pwd))
#conn_info = ('Driver{MySQL51};Server=%s;Port=%s;Database=%s;User=%s; Password=%s;Option=3;'%(host, port, database, user,password)) #conn1_info=('Driver={sql server native client 10.0};server=%s;database=%s;uid=%s;pwd=%s;'%('10.34.1.30','LogFeedback','sa','ahswyc'))
#print conn1_info
conn_info=('driver=%s;server=%s;database=%s;uid=%s;pwd=%s;'%(db_driver,db_server,db_database,db_uid,db_pwd))
#print conn_info
conn=pyodbc.connect(conn_info) #固定配置
#conn = pyodbc.connect('driver={sql server native client 10.0};server=10.34.1.30;database=LogFeedback;uid=sa;pwd=ahswyc;')
"""
conn = pyodbc.connect(
driver='{sql server native client 10.0}',
server='10.34.1.30',
database='LogFeedback',
uid='sa',
pwd='ahswyc'
)
""" cursor = conn.cursor() #参考调试,入库测试语句
#cursor.execute("insert into iisEstablishConnCounts(timestramp,iisHostIp,connCounts) values('2017/07/06:22:10','10.34.1.23',90)") #sql入库字段基于变量
#80端口连接数统计
#netstat -na -p tcp| findstr 80 | find /C "ESTABLISH" #cmd="netstat -na -p tcp| findstr 80 | find /C \"ESTABLISH\""
#(status,output) = commands.getstatusoutput("%s" % cmd) #python执行调用系统命令并将结果保存到变量
#注意在linux下用python执行系统命令并将结果保存到变量与windows不同如下语句
#cmd="netstat -na -p tcp| findstr 80 | find /C \"ESTABLISH\""
#(status,output) = commands.getstatusoutput("%s" % cmd) #获取监控时间戳
dt= datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#print(dt)
dateTime =dt
#print("debug1: ",dateTime)
print("current dataTime: ",dateTime)
#获取本地的弃用,程序主要用于获取远程主机的监控数据 #调试暂时打开
#portCounts=os.popen("netstat -na -p tcp| findstr 80 | find /C \"ESTABLISH\"").read()
#print("debug2: ",portCounts) #人工配置数据
location=dbconf.get('const_18','location_18')
describe=dbconf.get('const_18','describe_18')
countport=dbconf.get('const_18','countport_18') #print(chardet.detect(location.encode('utf-8')))
#print type(location)
#op=('%s' % location).encode('gbk')
#print op
#localT=ur'省水文局'
#localT='水文局3tets1123'.decode('utf-8')
#localT=location.decode('utf-8').encode('gbk')
print location
localT=location
#localT="anhui shuiwen ju"
print("monitr location: ",localT)
#因为在被监控机部署ssh server,所以ip是同一个,都是被监控机器ip
ssh_ip=dbconf.get('ssh_18','ssh_ip_18')
mip=ssh_ip
print("monitor ip:",mip) mport=countport
print("port: ",mport) #paramiko日志
#logfiles=datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
#os.environ['logfiles'] = str(logfiles)
#paramiko.util.log_to_file(os.system(echo '../logs/$logfiles.txt'))
#paramiko.util.log_to_file("../logs/{logfiles}".txt) #远程监控数据获取
#paramiko ssh跨机建立
#transport = paramiko.Transport(('192.168.11.181',22))
tail_ip=ssh_ip.split('.')[3]
logfiles=datetime.datetime.now().strftime('%Y-%m-%d_%H_%M_%S')
paramiko.util.log_to_file('../logs/%s-%s.txt'% (logfiles,tail_ip)) """
transport = paramiko.Transport(('10.34.1.23', 22))
transport.connect(username='ahwater', password='Aa7788..')
ssh = paramiko.SSHClient()
ssh._transport = transport
"""
#ssh conn read conf
#ssh连接属性读取配置文件
#ssh_ip=dbconf.get('ssh_18','ssh_ip_18')
ssh_port=int(dbconf.get('ssh_18','ssh_port_18'))
ssh_username=dbconf.get('ssh_18','ssh_username_18')
ssh_password=dbconf.get('ssh_18','ssh_password_18')
#print ssh_password
transport = paramiko.Transport((ssh_ip, ssh_port))
transport.connect(username=ssh_username, password=ssh_password)
ssh = paramiko.SSHClient()
ssh._transport = transport #主机名
cmd01='hostname'
stdin, stdout, stderr = ssh.exec_command(cmd01)
#print(stdout.read())
data01=stdout.read().strip().replace(' ', '').replace('\t', '').replace('\r', '').strip()
print("monitor hostname: ",data01) #端口连接数统计
#在人工配置数据处填写的端口的端口establish port counts
cmd02='netstat -na'
stdin, stdout, stderr = ssh.exec_command(cmd02)
data02=stdout.read().count('')
#data02=stdout.read().count(mport)
print("port est counts: ",data02) #cpu使用率
cmd03='wmic cpu get LoadPercentage'
stdin, stdout, stderr = ssh.exec_command(cmd03)
#da03=stdout.read().strip('LoadPercentage').replace('\n', '').replace('\t', '').replace('\r', '').replace(' ','').strip()
#da03=stdout.read().strip('LoadPercentage').replace(' ','').replace('\n','').replace('\t', '').replace('\r', '')
da03=stdout.read().strip('LoadPercentage').replace('\n', '').replace('\t', '').replace('\r', '').replace(' ','').replace('\n','').strip()
#print(da03)
statis=0
counts=0
for i in da03:
#cpu物理核心统计
counts = counts + 1
statis = statis + int(i)
try:
#print(counts)
data03=round(float(statis)/counts/100,6)
#data003="'" +data03 +"'"
print("cpu use ratio: ",data03)
except:
pass #内存总量/Gb
"""
cmd04='wmic memorychip get capacity'
stdin,stdout,stderr = ssh.exec_command(cmd04)
da04=stdout.read().strip('Capacity').replace('\n','').replace('\t','').replace('\r','').replace(' ','').strip()
data04=float(da04)/1024/1024/1024
print("mem total Gb: ",data04)
"""
cmd04='wmic memorychip get capacity'
stdin,stdout,stderr = ssh.exec_command(cmd04)
d4_1=stdout.read().strip('Capacity').replace(' ','').replace('\t','').replace('\r','').strip()
d4_2=(' '.join(filter(lambda x: x, d4_1.split(' '))))
d4_3=d4_2.split('\n') counts_4=0
for i in d4_3:
counts_4 = counts_4 + int(i) data04=float(counts_4)/1024/1024/1024
print("mem total Gb: ",data04) #内存剩余量/Gb
cmd05='wmic OS get FreePhysicalMemory'
stdin,stdout,stderr = ssh.exec_command(cmd05)
da05=stdout.read().strip('FreePhysicalMemory').replace('\n','').replace('\t','').replace('\r','').replace(' ','').strip()
data05=round(float(da05)/1024/1024,4)
print("mem free Gb: ",data05) #内存使用率
data06=round(float((data04 - data05)) / data04,4)
print("mem use ratio: ",data06) #磁盘信息,根系统盘C:
#cmd07='fsutil volume diskfree c:'
cmd07='wmic LOGICALDISK get FreeSpace,Size'
#C盘总量
stdin,stdout,stderr = ssh.exec_command(cmd07)
#删除FreeSpace,Size字符
d7_1=stdout.read().strip().replace('FreeSpace','').replace('Size','')
#删除r-n
d7_2=d7_1.strip().replace('\r','').replace('\n','')
#替换多个' '为单个
d7_3=(' '.join(filter(lambda x: x, d7_2.split(' '))))
#转换str->list
disk_data=d7_3.split(' ') #获取C分区盘总量Gb,获取的数据默认单位是bytes
data07=round(float(disk_data[1])/1024/1024/1024,4)
print("C disk total Gb:",data07)
#获取C分区盘剩余量Gb
data08=round(float(disk_data[0])/1024/1024/1024,4)
print("C disk free Gb:",data08)
#C分区盘使用率
data09=round((data07 - data08) / data07,4)
print("C disk space use ratio: ",data09) #获取网络流量信息
cmd08='netstat -e'
stdin, stdout, stderr = ssh.exec_command(cmd08)
d8_1=stdout.read().strip().rstrip().lstrip().replace('\r','').replace('\n','')
d8_2=(' '.join(filter(lambda x: x, d8_1.split(' '))))
d8_3=d8_2.split(' ') #数据流默认bytes,把汉字字符剔除
net_data=re.sub('[^\u4e00-\u9fa5]','',d8_3[4])
#发送的流量累加总计Gb
data10=round(float(net_data)/1024/1024/1024,4)
print("send trafic flow Gb: ",data10)
#接收的流量累加总计
#net_data2=re.sub('[^\u4e00-\u9fa5]','',d8_3[3])
net_data2=d8_3[3]
data11=round(float(net_data2)/1024/1024/1024,4)
print("recv trafic flow Gb: ",data11) #发送的数据包累加总数Tcp/ip层
#发送的数据包累加总数
data12=int(re.sub('[^\u4e00-\u9fa5]','',d8_3[6]))
#data12=round(float(net_data3)/1024/1024/1024,4)
print("send packets: ",data12) #接收的数据包累计
data13=int(d8_3[5])
print("recv packets: ",data13) #数据入库
#字段值基于变量的sql语句模型
sql_debug = """insert into iisEstablishConnCounts(timestramp,iisHostIp,connCounts) values(
%(timestramp)s,
'10.34.1.23',
%(connCounts)s
)
"""
# id,timestramp,location,ip,hostname,port,port_est_counts,cpu_use_ratio
# mem_total,mem_free,mem_use_ratio
# disk_drive_c_total,disk_drive_c_free,disk_drive_c_use_ratio
# send_flow,recv_flow,send_packets,recv_packets sql = """insert into ahwater_perf_monitor(timestramp,location,ip,hostname,host_use_description,port,port_est_counts,cpu_use_ratio,
mem_total,mem_free,mem_use_ratio,
disk_drive_c_total,disk_drive_c_free,disk_drive_c_use_ratio,
send_flow,recv_flow,send_packets,recv_packets) values(
%(timestramp)s,
%(location)s,
%(ip)s,
%(hostname)s,
%(host_use_description)s,
%(port)s,
%(port_est_counts)s,
%(cpu_use_ratio)s,
%(mem_total)s,
%(mem_free)s,
%(mem_use_ratio)s,
%(disk_drive_c_total)s,
%(disk_drive_c_free)s,
%(disk_drive_c_use_ratio)s,
%(send_flow)s,
%(recv_flow)s,
%(send_packets)s,
%(recv_packets)s
)
""" #print(sql)
#cursor.execute(sql_debug % dict(timestramp = dateTime,connCounts = portCounts)) #print("\n") """
print dateTime
print localT
print mip
print data01
print mport
print data02
print data03
print data04
print data05
print data06
print data07
print data08
print data09
print data10
print data11
print data12
print data13
"""
#print describe cursor.execute(sql % dict(
timestramp="'" + dateTime + "'",
location="'" + localT + "'",
ip="'" + mip + "'",
hostname="'" + data01 + "'",
host_use_description="'" + describe + "'",
port=mport,
port_est_counts=data02,
cpu_use_ratio=data03,
mem_total=data04,
mem_free=data05,
mem_use_ratio=data06,
disk_drive_c_total=data07,
disk_drive_c_free=data08,
disk_drive_c_use_ratio=data09,
send_flow=data10,
recv_flow=data11,
send_packets=data12,
recv_packets=data13
)) """
cursor.execute(sql % dict(
timestramp=dateTime,
location=localT,
ip=mip,
hostname=data01,
port=mport,
port_est_counts="'" +str(data02) + "'",
cpu_use_ratio="," +str(data03) + "'",
mem_total="'"+str(data04)+"'",
mem_free="'"+str(data05)+"'",
mem_use_ratio="'"+str(data06)+"'",
disk_drive_c_total="'"+str(data07)+"'",
disk_drive_c_free="'" +str(data08)+"'",
disk_drive_c_use_ratio="'"+str(data09)+"'",
send_flow="'"+str(data10)+"'",
recv_flow="'"+str(data11)+"'",
send_packets=data12,
recv_packets=data13
))
""" #关闭pyodbc conn连接
conn.commit()
conn.close()
#关闭paramiko ssh回话
transport.close()

python window使用paramiko简单监控数据指标数据采集的更多相关文章

  1. docker-compose(grafana influxdb) + telegraf 快速搭建简单监控

     灵活实现方案:   1:     telegraf 为go 语言写得占用内存小 收集主机各项监控数据 定时写入 时序DB   influxdb ------------------------&qu ...

  2. Python调用Prometheus监控数据并计算

    Prometheus是什么 Prometheus是一套开源监控系统和告警为一体,由go语言(golang)开发,是监控+报警+时间序列数 据库的组合.适合监控docker容器.因为kubernetes ...

  3. 使用python获取整月每一天的系统监控数据生成报表

    1.安装阿里开源监控工具tsar tsar官方网站 wget -O tsar.zip https://github.com/alibaba/tsar/archive/master.zip --no-c ...

  4. 用python实现简单EXCEL数据统计的实例

    用python实现简单EXCEL数据统计的实例 下面小编就为大家带来一篇用python实现简单EXCEL数据统计的实例.小编觉得挺不错的,现在就分享给大家,也给大家做个参考.一起跟随小编过来看看吧 任 ...

  5. Python之路,Day20 - 分布式监控系统开发

    Python之路,Day20 - 分布式监控系统开发   本节内容 为什么要做监控? 常用监控系统设计讨论 监控系统架构设计 监控表结构设计 为什么要做监控? –熟悉IT监控系统的设计原理 –开发一个 ...

  6. Python常用的库简单介绍一下

    Python常用的库简单介绍一下fuzzywuzzy ,字符串模糊匹配. esmre ,正则表达式的加速器. colorama 主要用来给文本添加各种颜色,并且非常简单易用. Prettytable ...

  7. 探索Windows Azure 监控和自动伸缩系列2 - 获取虚拟机的监控定义和监控数据

    上一篇博文介绍了如何连接Windows Azure: http://www.cnblogs.com/teld/p/5113063.html 本篇我们继续上次的示例代码,获取虚拟机的监控定义和监控数据. ...

  8. Python股票分析系列——基础股票数据操作(一).p3

    该系列视频已经搬运至bilibili: 点击查看 欢迎来到Python for Finance教程系列的第3部分.在本教程中,我们将使用我们的股票数据进一步分解一些基本的数据操作和可视化.我们将要使用 ...

  9. [博客迁移]探索Windows Azure 监控和自动伸缩系列2 - 获取虚拟机的监控定义和监控数据

    上一篇博文介绍了如何连接Windows Azure: http://www.cnblogs.com/teld/p/5113063.html 本篇我们继续上次的示例代码,获取虚拟机的监控定义和监控数据. ...

随机推荐

  1. mysql基础(3)-高级查询

    聚合函数 count 返回查询结果的条数 max 返回查询结果的最大值 min 返回查询结果的最小值 sum 返回查询结果的和 avg 返回查询结果的平均值   统计分数大于等于90的人数: mysq ...

  2. adb 解说

    ADB是一个 客户端-服务器端 程序, 其中客户端是你用来操作的电脑, 服务器端是android设备. 先说安装方法, 电脑上需要安装客户端. 客户端包含在sdk里. 设备上不需要安装, 只需要在手机 ...

  3. JavaScript -- 广告随鼠标移动, 点击一次后关闭

    <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/ ...

  4. hdoj1006--Tick and Tick

    Problem Description The three hands of the clock are rotating every second and meeting each other ma ...

  5. 《Advanced Bash-scripting Guide》学习(七):描述、列表和确定是否可以安装一个rpm包

    本文所选的例子来自于<Advanced Bash-scripting Gudie>一书,译者 杨春敏 黄毅 Example 3-2.将一个代码块的结果保存到文件 #!/bin/bash # ...

  6. hive中order by,sort by, distribute by, cluster by的用法

    1.order by hive中的order by 和传统sql中的order by 一样,对数据做全局排序,加上排序,会新启动一个job进行排序,会把所有数据放到同一个reduce中进行处理,不管数 ...

  7. TreeView的用法总结

    1.循环往treeview中添加数据 public AuthorNavUserControl() { InitializeComponent(); LoadTrees(MainForm.Project ...

  8. Uva1401(字典树)

    1401 - Remember the Word Time limit: 3.000 seconds Neal is very curious about combinatorial problems ...

  9. UVA - 10817 Headmaster's Headache (状压类背包dp+三进制编码)

    题目链接 题目大意:有S门课程,N名在职教师和M名求职者,每名在职教师或求职者都有自己能教的课程集合以及工资,要求花费尽量少的钱选择一些人,使得每门课程都有至少两人教.在职教师必须选. 可以把“每个课 ...

  10. FS系统开发设计(思维导图)

      FS系统开发设计(思维导图) 最近做了一个小系统,公司应急,要对各个部门进行费用成本核算分摊,做运维,苦于无奈,简简单单的设计了一下,模仿用友ERP软件,首先对DB进行了初步设计,总体如下: 未完 ...