需求:有一個字典,有三千多個key,需要對其進行字典切割,切割後存入列表(list)中。
# -*- coding: UTF-8 -*-
import multiprocessing # 加載多進程庫
import itertools #加載字典切割庫
import time
from requests.adapters import HTTPAdapter
import requests
# 拼接url,返回一個字典url_dict,該字典長度有三千多個
domains=".x.xxx.com/data/newworldkill"
with open("/root/carl11.txt","r") as f:
url_dict={}
num="1"
for line in f.readlines():
list = line.strip().split("|")
for i in list[0].split(","):
url="http://s"+str(i)+domains
dbnum=list[2]
url_dict[url]=dbnum
#get_status(url,dbnum)
# 分割字典
def splitDict(d):
lists = []
n = len(d)//100 # length of smaller half
i = iter(d.items()) # alternatively, i = d.iteritems() works in Python 2
for x in range(100):
d = dict(itertools.islice(i, n)) # grab first n items
lists.append(d)
return lists
# 運行主體函數
def run(url_dicts):
headers = {
'Connection': 'close',
}
for value,key in url_dicts.items():
status_code = requests.get(value,headers=headers).status_code
if str(status_code) == "200":
print(value+"|"+str(key))
else:
print(str(key)+u"區不正常")
if __name__ == '__main__':
# 多進程運行程序
for i in range(100):
lists = splitDict(url_dict)
p = multiprocessing.Process(target=run,args=(lists[i],))
p.start()```