top - 16:14:35 up 2 days,  3:04,  7 users,  load average: 2.22, 1.84, 1.77
Tasks: 512 total, 2 running, 509 sleeping, 0 stopped, 1 zombie
%Cpu0 : 5.0 us, 1.0 sy, 17.0 ni, 77.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu1 : 5.9 us, 1.0 sy, 1.0 ni, 92.2 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu2 : 7.0 us, 0.0 sy, 0.0 ni, 93.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu3 : 4.0 us, 0.0 sy, 3.0 ni, 93.1 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu4 : 5.8 us, 1.0 sy, 1.9 ni, 91.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu5 : 22.0 us, 1.0 sy, 0.0 ni, 77.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu6 : 4.0 us, 0.0 sy, 0.0 ni, 96.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu7 : 4.0 us, 0.0 sy, 0.0 ni, 96.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu8 : 4.0 us, 0.0 sy, 0.0 ni, 96.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu9 : 18.6 us, 1.0 sy, 1.0 ni, 79.4 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu10 : 3.9 us, 0.0 sy, 0.0 ni, 96.1 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
%Cpu11 : 3.9 us, 0.0 sy, 0.0 ni, 96.1 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
MiB Mem: 32067.54+total, 7194.383 used, 24873.16+free, 750.664 buffers
MiB Swap: 15258.99+total, 0.000 used, 15258.99+free. 1660.316 cached Mem PID VIRT RES SHR S %CPU %MEM COMMAND nTH P SWAP CODE DATA nMaj nDRT USED
148266 1786.7m 262.3m 65.6m S 46.3 0.8 compiz 17 3 0.0m 0.0m 995.3m 266 0 262.3m
145203 228.5m 70.3m 40.8m S 25.6 0.2 Xvnc 1 2 0.0m 4.3m 29.7m 0 0 70.3m
9024 32.1m 4.1m 3.3m S 21.6 0.0 fiberlamp 1 9 0.0m 0.0m 1.0m 0 0 4.1m
3535 214.9m 68.5m 45.2m S 20.7 0.2 Xvnc 1 4 0.0m 4.3m 23.6m 25 0 68.5m
7905 31.4m 3.4m 3.1m S 3.9 0.0 fuzzyflakes 1 5 0.0m 0.0m 0.4m 0 0 3.4m
145581 20.5m 3.0m 2.3m R 2.0 0.0 top 1 1 0.0m 0.1m 1.3m 1 0 3.0m
1454 12.947g 1.416g 31.9m S 1.0 4.5 java 51 0 0.0m 0.0m 12.809g 168 0 1.416g
3556 751.9m 66.7m 51.8m S 1.0 0.2 xfdesktop 3 11 0.0m 0.3m 300.4m 277 0 66.7m
8956 20.5m 2.9m 2.2m R 1.0 0.0 top 1 0 0.0m 0.1m 1.3m 0 0 2.9m

解析代码如下:

 # -*- coding: utf-8 -*-

 import sqlite3
import os
import time def create_load_info_table(cursor):
create_sql = '''
CREATE TABLE IF NOT EXISTS load_info(
min1_load REAL,
min5_load REAL,
min15_load REAL,
record_time TEXT,
time_stamp TEXT
)
'''
cursor.execute(create_sql) def create_task_info_table(cursor):
create_sql = '''
CREATE TABLE IF NOT EXISTS task_info(
total INTEGER,
running INTEGER,
sleeping INTEGER,
stopped INTEGER,
zombie INTEGER,
record_time TEXT,
time_stamp TEXT
)
'''
cursor.execute(create_sql) def create_cpu_info_table(cursor):
create_sql = '''
CREATE TABLE IF NOT EXISTS cpu_info(
cpu_name TEXT,
us REAL,
sy REAL,
ni REAL,
id REAL,
wa REAL,
hi REAL,
si REAL,
st REAL,
record_time TEXT,
time_stamp TEXT
)
'''
cursor.execute(create_sql) def create_mem_info_table(cursor):
create_sql = '''
CREATE TABLE IF NOT EXISTS mem_info(
total REAL,
used REAL,
free REAL,
buffers REAL,
record_time TEXT,
time_stamp TEXT
)
'''
cursor.execute(create_sql) def create_swap_info_table(cursor):
create_sql = '''
CREATE TABLE IF NOT EXISTS swap_info(
total REAL,
used REAL,
free REAL,
cached REAL,
record_time TEXT,
time_stamp TEXT
)
'''
cursor.execute(create_sql) def create_process_info_table(cursor):
create_sql = '''
CREATE TABLE IF NOT EXISTS process_info(
PID INTEGER,
VIRT REAL,
RES REAL,
SHR REAL,
S TEXT,
CPU REAL,
MEM REAL,
COMMAND TEXT,
nTH INTEGER,
P INTEGER,
SWAP REAL,
CODE REAL,
DATA REAL,
nMaj INTEGER,
nDRT INTEGER,
USED REAL,
record_time TEXT,
time_stamp TEXT
)
'''
cursor.execute(create_sql) cur_dir = os.getcwd()
db_name = 'top_info.db'
top_log_name = 'top_cpu.txt' conn = sqlite3.connect(db_name)
cursor = conn.cursor()
get_all_table = "SELECT tbl_name FROM sqlite_master where type = 'table'"
cursor.execute(get_all_table)
all_table_list = cursor.fetchall()
all_table_name_list = [] for table_name in all_table_list:
all_table_name_list.append(table_name[0])
delete_sql = "DELETE FROM %s" % (table_name[0]) #删除原来的记录
cursor.execute(delete_sql) cursor.execute("VACUUM") # VACUUM 命令清除未使用的空间
conn.commit() topfile = open(top_log_name, 'r') try:
lines = topfile.readlines()
cur_time_stamp = ''
cur_record_time = '' for line in lines:
line = line.strip()
cur_time_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) if len(line) == 0:
continue
elif line[:3] == 'top':
if 'load_info' not in all_table_name_list:
create_load_info_table(cursor) #['top - 08:14:17 up 2 days', ' 15:36', ' 15 users', ' load average: 5.46', ' 5.29', ' 5.18']
load_list = line.split(',')
#['top', '-', '08:14:17', 'up', '2', 'days']
cur_time_list = load_list[0].split()
cur_time = cur_time_list[2]
cur_record_time = cur_time
#['load average', ' 5.46']
min1_load_list = load_list[3].strip().split(':')
min1_load = float(min1_load_list[1])
min5_load = float(load_list[4])
min15_load = float(load_list[5]) insert_sql = "INSERT INTO load_info VALUES (%0.2f, %0.2f, %0.2f, '%s', '%s')" % (min1_load, min5_load, min15_load, cur_record_time, cur_time_stamp)
cursor.execute(insert_sql)
#conn.commit()
elif line[:5] == 'Tasks':#Threads
if 'task_info' not in all_table_name_list:
create_task_info_table(cursor) #'Tasks: 898 total, 5 running, 890 sleeping, 0 stopped, 3 zombie'
task_list = line.split(',')
#['Tasks: 898 total', ' 5 running', ' 890 sleeping', ' 0 stopped', ' 3 zombie']
sum_task_count = int(task_list[0].strip().split(':')[1].split()[0])
running_task_count = int(task_list[1].strip().split()[0])
sleeping_task_count = int(task_list[2].strip().split()[0])
stopped_task_count = int(task_list[3].strip().split()[0])
zombia_task_count = int(task_list[4].strip().split()[0]) insert_sql = "INSERT INTO task_info VALUES (%d, %d, %d, %d, %d, '%s', '%s')" % (sum_task_count, running_task_count, sleeping_task_count, stopped_task_count, zombia_task_count, cur_record_time, cur_time_stamp)
cursor.execute(insert_sql)
#conn.commit()
elif line[:4] == '%Cpu':#%Cpu0
if 'cpu_info' not in all_table_name_list:
create_cpu_info_table(cursor) cpu_name = line[:line.index(':')].strip() #'%Cpu0 : 19.6 us, 2.0 sy, 5.8 ni, 72.6 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st'
cpu_list = line.split(':')[1].strip().split(',')
#['19.6 us', ' 2.0 sy', ' 5.8 ni', ' 72.6 id', ' 0.0 wa', ' 0.0 hi', ' 0.0 si', ' 0.0 st']
us_percent = float(cpu_list[0].strip().split()[0])
#time running un-niced user processes
sy_percent = float(cpu_list[1].strip().split()[0])
#time running kernel processes
ni_percent = float(cpu_list[2].strip().split()[0])
#time running niced user processes
id_percent = float(cpu_list[3].strip().split()[0])
#time spent in the kernel idle handler
wa_percent = float(cpu_list[4].strip().split()[0])
#time waiting for I/O completion
hi_percent = float(cpu_list[5].strip().split()[0])
#time spent serving hardware interrupts
si_percent = float(cpu_list[6].strip().split()[0])
#time spent serving software interrupts
st_percent = float(cpu_list[7].strip().split()[0])
#time stolen from this vm by the hypervisor insert_sql = "INSERT INTO cpu_info VALUES ('%s', %0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f, '%s', '%s')" % (cpu_name, us_percent, sy_percent, ni_percent, id_percent, wa_percent, hi_percent, si_percent, st_percent, cur_record_time, cur_time_stamp)
cursor.execute(insert_sql)
#conn.commit()
elif line[:7] == 'KiB Mem':#KiB Mem,主要用的交互选项E设置的单位
if 'mem_info' not in all_table_name_list:
create_mem_info_table(cursor) #'KiB Mem: 32837164 total, 10604512 used, 22232652 free, 1117824 buffers'
Men_list = line.split(':')[1].strip().split(',')
#['32837164 total', ' 10604512 used', ' 22232652 free', ' 1117824 buffers']
total_mem = float(Men_list[0].strip().split()[0]) / 1024
used_men = float(Men_list[1].strip().split()[0]) / 1024
free_men = float(Men_list[2].strip().split()[0]) / 1024
buffer_men = float(Men_list[3].strip().split()[0]) / 1024 insert_sql = "INSERT INTO mem_info VALUES (%0.2f, %0.2f, %0.2f, %0.2f, '%s', '%s')" % (total_mem, used_men, free_men, buffer_men, cur_record_time, cur_time_stamp)
cursor.execute(insert_sql)
#conn.commit()
elif line[:7] == 'MiB Mem':#MiB Mem
if 'mem_info' not in all_table_name_list:
create_mem_info_table(cursor) #MiB Mem: 32067.54+total, 5090.746 used, 26976.79+free, 624.168 buffers
Men_list = line.split(':')[1].strip().split(',') total_mem = 0.0
if '+' in Men_list[0]:
total_mem = float(Men_list[0].strip().split('+')[0])
else:
total_mem = float(Men_list[0].strip().split()[0]) used_men = 0.0
if '+' in Men_list[1]:
used_men = float(Men_list[1].strip().split('+')[0])
else:
used_men = float(Men_list[1].strip().split()[0]) free_men = 0.0
if '+' in Men_list[2]:
free_men = float(Men_list[2].strip().split('+')[0])
else:
free_men = float(Men_list[2].strip().split()[0]) buffer_men = 0.0
if '+' in Men_list[3]:
buffer_men = float(Men_list[3].strip().split('+')[0])
else:
buffer_men = float(Men_list[3].strip().split()[0]) insert_sql = "INSERT INTO mem_info VALUES (%0.2f, %0.2f, %0.2f, %0.2f, '%s', '%s')" % (total_mem, used_men, free_men, buffer_men, cur_record_time, cur_time_stamp)
cursor.execute(insert_sql)
#conn.commit()
elif line[:8] == 'KiB Swap': #主要用的交互选项E设置的单位
if 'swap_info' not in all_table_name_list:
create_swap_info_table(cursor) #'KiB Swap: 15625212 total, 0 used, 15625212 free. 3900452 cached Mem'
Swap_list = line.split(':')[1].strip().split(',')
#['15625212 total', ' 0 used', ' 15625212 free. 3900452 cached Mem']
total_swap = float(Swap_list[0].strip().split()[0]) / 1024
used_swap = float(Swap_list[1].strip().split()[0]) / 1024 free_cache_list = Swap_list[2].strip().split('.') free_swap = float(free_cache_list[0].strip().split()[0]) / 1024
cache_swap = float(free_cache_list[1].strip().split()[0]) / 1024 insert_sql = "INSERT INTO swap_info VALUES (%0.2f, %0.2f, %0.2f, %0.2f, '%s', '%s')" % (total_swap, used_swap, free_swap, cache_swap, cur_record_time, cur_time_stamp)
cursor.execute(insert_sql)
#conn.commit()
elif line[:8] == 'MiB Swap':
if 'swap_info' not in all_table_name_list:
create_swap_info_table(cursor) #MiB Swap: 15258.99+total, 0.000 used, 15258.99+free. 1475.379 cached Mem
Swap_list = line.split(':')[1].strip().split(',') total_swap = 0.0
if '+' in Swap_list[0]:
total_swap = float(Swap_list[0].strip().split('+')[0])
else:
total_swap = float(Swap_list[0].strip().split()[0]) used_swap = 0.0
if '+' in Swap_list[1]:
used_swap = float(Swap_list[1].strip().split('+')[0])
else:
used_swap = float(Swap_list[1].strip().split()[0]) free_cache_list = Swap_list[2].strip().split('.') free_swap = 0.0
if '+' in free_cache_list[0]:
free_swap = float(free_cache_list[0].strip().split('+')[0])
else:
free_swap = float(free_cache_list[0].strip().split()[0]) cache_swap = 0.0
if '+' in free_cache_list[1]:
cache_swap = float(free_cache_list[1].strip().split('+')[0])
else:
cache_swap = float(free_cache_list[1].strip().split()[0]) insert_sql = "INSERT INTO swap_info VALUES (%0.2f, %0.2f, %0.2f, %0.2f, '%s', '%s')" % (total_swap, used_swap, free_swap, cache_swap, cur_record_time, cur_time_stamp)
cursor.execute(insert_sql)
#conn.commit()
elif line[:3] == 'PID':
continue
else:
if 'process_info' not in all_table_name_list:
create_process_info_table(cursor) #PID VIRT RES SHR S %CPU %MEM COMMAND nTH P SWAP CODE DATA nMaj nDRT USED
#'157271 459.9m 256.8m 13.6m R 92.3 0.8 bundle 2 0 0.0m 0.0m 251.6m 0 0 256.8m'
process_list = line.split()
PID = int(process_list[0])
#Process Id VIRT = 0.0
if 'm' in process_list[1]: #这里的显示单位交互模式下用的是小写的e作为设置
VIRT = float(process_list[1][:-1])
elif 'g' in process_list[1]:
VIRT = float(process_list[1][:-1]) * 1024
else:
VIRT = float(process_list[1]) / 1024
#Virtual Memory Size
#The total amount of virtual memory used by the task. It includes all code, data and shared libraries
#plus pages that have been swapped out and pages that have been mapped but not used. RES = 0.0
if 'm' in process_list[2]:
RES = float(process_list[2][:-1])
elif 'g' in process_list[2]:
RES = float(process_list[2][:-1]) * 1024
else:
RES = float(process_list[2]) / 1024 #Resident Memory Size
#A subset of the virtual address space(VIRT) representing the non-swapped physical memory a task is
#currently using. SHR = 0.0
if 'm' in process_list[3]:
SHR = float(process_list[3][:-1])
elif 'g' in process_list[3]:
SHR = float(process_list[3][:-1]) * 1024
else:
SHR = float(process_list[3]) / 1024 #Shared Memory Size
#A subset of resident memory(RES) that may be used by other processes.
process_status = process_list[4]
#D = uninterruptible sleeping
#R = running
#S = sleeping
#T = stopped by job control signal
#t = stopped by debugger during trace
#Z = zombia
cpu_usage = float(process_list[5]) #百分比对应的是单个cpu还是cpu之和使用的是交互选项I设置的
mem_usage = float(process_list[6])
command_name = process_list[7]
thread_count = int(process_list[8])
use_cpu_index = int(process_list[9]) SWAP = 0.0
if 'm' in process_list[10]:
SWAP = float(process_list[10][:-1])
elif 'g' in process_list[10]:
SWAP = float(process_list[10][:-1]) * 1024
else:
SWAP = float(process_list[10]) / 1024 #The formerly resident portion of a task's address space written to the swap file when physical memory
#becomes over committed. CODE = 0.0
if 'm' in process_list[11]:
CODE = float(process_list[11][:-1])
elif 'g' in process_list[11]:
CODE = float(process_list[11][:-1]) * 1024
else:
CODE = float(process_list[11]) / 1024
#Code Size
#The amount of physical memory currently devoted to executable code, also know as the Text Resident Set or TRS DATA = 0.0
if 'm' in process_list[12]:
DATA = float(process_list[12][:-1])
elif 'g' in process_list[12]:
DATA = float(process_list[12][:-1]) * 1024
else:
DATA = float(process_list[12]) / 1024 #Data + Stack Size
#The amount of private memory reserved by a process. It is also known as the Data Resident Set or DRS.
#Such memory may not yet be mapped to physical memory(RES) but will always be included in the
#virtual memory (VIRT) amount.
nMaj = int(process_list[13])
#Major Page Fault Count
#The number of major page faults that have occurred for a task. A page fault occurs when a process attempts
#to read from or write to a virtual page that is not currently present in its address.
#A major page fault is when auxiliary storage access is invloved in making that page available.
nDRT = int(process_list[14])
#Dirty Pages Count
#The number of pages that have been modified since they were last written to auxiliary storage. Dirty pages
#must be written to auxiliary storage before the corresponding physical memory location can be used for
#some other virtual page. USED = 0.0
if 'm' in process_list[15]:
USED = float(process_list[15][:-1])
elif 'g' in process_list[15]:
USED = float(process_list[15][:-1]) * 1024
else:
USED = float(process_list[15]) / 1024 #Memory in Use
#This field represents the non-swapped physical memory a task is using (RES) plus the swapped out portion
#of its address space (SWAP). #8943 397.8m 218.4m 12.7m R 95.9 0.7 bundle 2 9 0.0m 0.0m 216.3m 0 0 218.4m
insert_sql = "INSERT INTO process_info VALUES (%d, %0.2f, %0.2f, %0.2f, '%s', %0.2f, %0.2f, '%s', %d, %d, %0.2f, %0.2f, %0.2f, %d, %d, %0.2f, '%s', '%s')" % (PID, VIRT, RES, SHR, process_status, cpu_usage, mem_usage, command_name, thread_count, use_cpu_index, SWAP, CODE, DATA, nMaj, nDRT, USED, cur_record_time, cur_time_stamp)
cursor.execute(insert_sql)
#conn.commit()
finally:
topfile.close()
cursor.close()
conn.commit()
conn.close()

python 解析top文件格式的更多相关文章

  1. python 解析docx文档的方法,以及利用Python从docx文档提取插入的文本对象和图片

    首先安装docx模块,通过pip install docx或者在docx官方链接上下载安装都可以 下面来看下如何解析docx文档:文档格式如下 有3个部分组成 1 正文:text文档 2 一个表格. ...

  2. 数据分析:基于Python的自定义文件格式转换系统

    *:first-child { margin-top: 0 !important; } body>*:last-child { margin-bottom: 0 !important; } /* ...

  3. Python解析HDF文件 分类: Python 2015-06-25 00:16 743人阅读 评论(0) 收藏

    前段时间因为一个业务的需求需要解析一个HDF格式的文件.在这之前也不知道到底什么是HDF文件.百度百科的解释如下: HDF是用于存储和分发科学数据的一种自我描述.多对象文件格式.HDF是由美国国家超级 ...

  4. python解析VOC的xml文件并转成自己需要的txt格式

    在进行神经网络训练的时候,自己标注的数据集往往会有数据量不够大以及代表性不强等问题,因此我们会采用开源数据集作为训练,开源数据集往往具有特定的格式,如果我们想将开源数据集为我们所用的话,就需要对其格式 ...

  5. Python解析Wav文件并绘制波形的方法

    资源下载 #本文PDF版下载 Python解析Wav文件并绘制波形的方法 #本文代码下载 Wav波形绘图代码 #本文实例音频文件night.wav下载 音频文件下载 (石进-夜的钢琴曲) 前言 在现在 ...

  6. pcapng文件的python解析实例以及抓包补遗

    为了弥补pcap文件的缺陷,让抓包文件可以容纳更多的信息,pcapng格式应运而生.关于它的介绍详见<PCAP Next Generation Dump File Format> 当前的w ...

  7. python cookbook第三版学习笔记七:python解析csv,json,xml文件

    CSV文件读取: Csv文件格式如下:分别有2行三列. 访问代码如下: f=open(r'E:\py_prj\test.csv','rb') f_csv=csv.reader(f) for f in ...

  8. 使用Python解析JSON数据

    使用Python解析百度API返回的JSON格式的数据 # coding:utf-8 # !/usr/bin/env python import matplotlib.pyplot as plt fr ...

  9. 使用Python解析JSON数据的基本方法

    这篇文章主要介绍了使用Python解析JSON数据的基本方法,是Python入门学习中的基础知识,需要的朋友可以参考下:     ----------------------------------- ...

随机推荐

  1. tomcat配置jdbc

    server.xml下<GlobalNamingResources> <Resource name="jdbc/Huobanplus"               ...

  2. Android 类加载原理 和热修复——深入浅出原理与实现

    一.简述 热修复无疑是这2年较火的新技术,是作为安卓工程师必学的技能之一.在热修复出现之前,一个已经上线的app中如果出现了bug,即使是一个非常小的bug,不及时更新的话有可能存在风险,若要及时更新 ...

  3. linux文件去重工具findup, fslint-gui

    findup, fslint-gui,图形化工具,直接使用

  4. yolo源码解析(三)

    七 测试网络 模型测试包含于test.py文件,Detector类的image_detector()函数用于检测目标. import os import cv2 import argparse imp ...

  5. NIO中的heap Buffer和direct Buffer区别

    在Java的NIO中,我们一般采用ByteBuffer缓冲区来传输数据,一般情况下我们创建Buffer对象是通过ByteBuffer的两个静态方法: ByteBuffer.allocate(int c ...

  6. 【VC++积累】之八、PreTranslageMessage;TranslageMessage;GetMessage和PeekMessage的区别

    先来看windows消息机制: 首先系统(也就是windows)把来自硬件(鼠标,键盘等消息)和来自应用程序的消息 放到一个系统消息队列中去. 而应用程序需要有自己的消息队列,也就是线程消息队列,每一 ...

  7. Pytorch 0.3加载0.4模型及其之间版本的变化

    1. 0.4中使用设备:.to(device) 2. 0.4中删除了Variable,直接tensor就可以 3. with torch.no_grad():的使用代替volatile:弃用volat ...

  8. iOS开发-UITextField手机号和邮箱验证

    不管是网页是手机,用户注册登录的时候绝大数时候都需要手机号码和邮箱地址,而且有些App会限制只能使用手机号注册,iOS方面邮箱正则比较简单,不过手机号码验证找了一下网上的,发现三大运营商的号码段有所变 ...

  9. 异常捕获 崩溃 Bugly ACRC 简介 总结 MD

    Markdown版本笔记 我的GitHub首页 我的博客 我的微信 我的邮箱 MyAndroidBlogs baiqiantao baiqiantao bqt20094 baiqiantao@sina ...

  10. 【站长起步】阿里云+Ubuntu+java 7+ Tomcat 7 +Nginx1.6 +Mysql 5.6

    本文记载了在阿里云ubuntu+java 镜像环境下搭建站点server环境中遇到的的错误和解决方式. 作为一个年轻人,是肯定不会去用alidata这个现成的环境的.怎么办? 所有删除.立刻创建一个 ...