multiprocess Poll.map python多進程提取處理大量文本的關鍵詞
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from multiprocessing import Pool,Queue,Process
import multiprocessing as mp
import time,random
import os
import codecs
import jieba.analyse
jieba.analyse.set_stop_words("yy_stop_words.txt")
def extract_keyword(input_string):
tags = jieba.analyse.extract_tags(input_string, topK=100)
return tags
def parallel_extract_keyword(input_string):
tags = jieba.analyse.extract_tags(input_string, topK=100)
return tags
if __name__ == "__main__":
data_file = sys.argv[1]
with codecs.open(data_file) as f:
lines = f.readlines()
f.close()
out_put = data_file.split('.')[0] +"_tags.txt"
t0 = time.time()
for line in lines:
parallel_extract_keyword(line)
print("串行處理花費時間{t}".format(t=time.time()-t0))
pool = Pool(processes=int(mp.cpu_count()*0.7))
t1 = time.time()
res = pool.map(parallel_extract_keyword,lines)
pool.close()
pool.join()
print("並行處理花費時間{t}s".format(t=time.time()-t1))
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.