統計服務端整體資源消耗的工具很多,如成熟的nmon等,
但是統計某些進程實際耗資源的沒有找到相關工具,用python簡單實現了下
#encoding:utf-8
import os,time
processlist = ['nginx','docker','java'] #如配置監控統計docker,nginx的cpu,mem使用情況
intervaltime = 5 #配置採集數據間隔時間
logfile = './log/TDE_so.csv' #配置日誌路徑和名字
def oscmd(cmd): #執行服務端命令函數
f = os.popen(cmd)
return f.readlines()
initdata = 'pid,cpu%,mem%,cmd,time' #文件頭信息
with open(logfile,'w') as f:
f.write(initdata)
f.write('\n')
while True: #死循環,不需要統計時關閉就行
for i in processlist:
cmd = "ps -ef | grep "+i+"|grep -v grep|awk -F ' ' '{print $2}'"
pids = oscmd(cmd) #獲取程序對應幾個進程
if pids:
for i in pids:
i = i.strip()
cmd2 = "top -p "+i+" -bn1|grep -A1 "+i+"| grep -v PID"
data = oscmd(cmd2) #獲取pid,cpu%,mem%,cmd等信息
data = data[0].split()
msg_cmd = data[11]
msg_pid = data[0]
msg_cpu = data[8]
msg_mem = data[5]
nowtime = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
wdata = msg_pid+','+msg_cpu+','+msg_mem+','+msg_cmd+','+nowtime #提取待寫入文件的數據,格式如:pid,cpu%,mem%,cmd,time
time2 = time.strftime("%Y%m%d", time.localtime())
with open(logfile,'a+') as f:
f.write(wdata)
f.write('\n')
time.sleep(intervaltime)
最終生成csv文件,然後使用excel打開做數據統計,如圖