统计服务端整体资源消耗的工具很多,如成熟的nmon等,
但是统计某些进程实际耗资源的没有找到相关工具,用python简单实现了下
#encoding:utf-8
import os,time
processlist = ['nginx','docker','java'] #如配置监控统计docker,nginx的cpu,mem使用情况
intervaltime = 5 #配置采集数据间隔时间
logfile = './log/TDE_so.csv' #配置日志路径和名字
def oscmd(cmd): #执行服务端命令函数
f = os.popen(cmd)
return f.readlines()
initdata = 'pid,cpu%,mem%,cmd,time' #文件头信息
with open(logfile,'w') as f:
f.write(initdata)
f.write('\n')
while True: #死循环,不需要统计时关闭就行
for i in processlist:
cmd = "ps -ef | grep "+i+"|grep -v grep|awk -F ' ' '{print $2}'"
pids = oscmd(cmd) #获取程序对应几个进程
if pids:
for i in pids:
i = i.strip()
cmd2 = "top -p "+i+" -bn1|grep -A1 "+i+"| grep -v PID"
data = oscmd(cmd2) #获取pid,cpu%,mem%,cmd等信息
data = data[0].split()
msg_cmd = data[11]
msg_pid = data[0]
msg_cpu = data[8]
msg_mem = data[5]
nowtime = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
wdata = msg_pid+','+msg_cpu+','+msg_mem+','+msg_cmd+','+nowtime #提取待写入文件的数据,格式如:pid,cpu%,mem%,cmd,time
time2 = time.strftime("%Y%m%d", time.localtime())
with open(logfile,'a+') as f:
f.write(wdata)
f.write('\n')
time.sleep(intervaltime)
最终生成csv文件,然后使用excel打开做数据统计,如图