Python--------日報爬蟲,代碼運行正常

import requests
import json
import threading
import time
import os
import re
 
paper_id = []
threads = 0
glock = threading.Lock()
header = {
    'User-Agent':
    'DailyApi/4 (Linux; Android 5.1.1; xiaomi 6 Build/xiaomi/xiaomi 6/x86/LMY48Z/zh_CN) Google-HTTP-Java-Client/1.22.0 (gzip) Google-HTTP-Java-Client/1.22.0 (gzip)'
}
path = ''
 
 
def get_single_paper(paper_id):
 
    global threads
    glock.acquire()
    threads += 1
    glock.release()
 
    url = 'https://news-at.zhihu.com/api/4/story/' + paper_id
    res = requests.get(url, headers=header)
    resJson = json.loads(res.content.decode('utf-8'))
    try:
        title = resJson['title']
        body = resJson['body']
        # css = resJson['css']  # css文件加了沒什麼效果,不加了
        # allcss = ''
        # for csss in css:
        #     allcss = allcss + '<link href="' + css + '"/>'
    except TypeError:
        print('json讀取失敗')
    else:
        f = open('./' + path + '/' + paper_id + '-' +
                 re.sub(r'[\\/:\*\?"<>\|]', '', title) + '.html',
                 'w',
                 encoding='utf-8')  # 文件名爲id+標題.html
        f.write(body)  # 各個文件獨立,不需要加鎖
        f.close()
 
    glock.acquire()
    threads -= 1
    glock.release()
    return
 
 
def get_papers(id, timestamp_end):
    global path
    timestamp = 0
    if id == '35':
        path = '小事'
    elif id == '2':
        path = '瞎扯-吐槽'
    if not os.path.exists(path):
        os.makedirs(path)
    while True:
        if timestamp > timestamp_end:
            url = 'https://news-at.zhihu.com/api/4/section/' + id + '/before/'
            res = requests.get(url + str(timestamp), headers=header)
        elif timestamp == 0:
            url = 'https://news-at.zhihu.com/api/4/section/' + id
            res = requests.get(url, headers=header)
        else:
            print('已到日期上限')
            break
        print(res.content.decode('utf-8'))
        resJson = json.loads(res.content.decode('utf-8'))
        timestamp = resJson['timestamp']
        # n = len(resJson['stories'])
        # for i in range(0, n):
        for storie in resJson['stories']:
            paper_id.append(storie['id'])
    return len(paper_id)
 
 
def thread_control(N):
    for iii in paper_id:
        p_id = str(iii)
        t = threading.Thread(target=get_single_paper, args=(p_id, ))
        t.start()
        # print('當前線程數:{:^5}'.format(threads))
        while threads >= N:
            time.sleep(0.2)
    t.join()
    print('已完成')
    return
 
 
# 35 小事
# 2  瞎扯·如何正確的吐槽
# 1490536800 20170306之前的數據
# 1553608800 20190306之前的數據
nums = get_papers('2', 1490536800)  # 第一個參數是文章類別id,第二個參數是時間上限
print('ID採集完成,共' + str(nums) + '個,開始下載')
thread_control(20)  # 線程數
# get_single_paper('9712276')

博客來源(https://www.52pojie.cn/forum.php?mod=viewthread&tid=976709&extra=page%3D1%26filter%3Dtypeid%26typeid%3D29)如有侵權請聯繫我,速刪

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章