直接上代碼吧,爬蟲思路寫在了註釋中
import requests
from lxml import etree
import time
import re
'''
1.需求分析
title gsmc gz addr jy xl fuli
入口地址:https://www.zhaopin.com/
2.源碼實現
所有職位分類標籤://div[@class='zp-jobNavigater-pop-list']/a
職位詳細列表:https://sou.zhaopin.com/?jl=489&kw=Java%E5%BC%80%E5%8F%91&kt=3
3.代碼實現
'''
# 1.獲取職位標籤
def get_job_tag(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
response = requests.get(starturl, headers=headers).text
# print(response)
# 解析源碼
HTML = etree.HTML(response)
# 獲取職位分類標籤
job_tag = HTML.xpath("//div[@class='zp-jobNavigater-pop-list']/a/text()")
return job_tag
# 獲取職位信息
def get_job_info(url, start, kw):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
info_html = requests.get(infourl.format(start, kw), headers=headers).json()
job_dict = {}
for i in info_html['data']['results']:
job_dict['city'] = i['city']['items'][0]['name']
job_dict['company_name'] = i['company']['name']
job_dict['company_size'] = i['company']['size']['name']
job_dict['companyType'] = i['company']['type']['name']
job_dict['eduLevel'] = i['eduLevel']['name']
job_dict['emplType'] = i['emplType']
job_dict['jobname'] = i['jobName']
job_dict['jobType'] = i['jobType']['display']
job_dict['salary'] = i['salary']
job_dict['welfare'] = i['welfare']
job_dict['updateDate'] = i['updateDate']
job_dict['workingExp'] = i['workingExp']['name']
# print(job_dict)
# print(i)
# 去重保存
if unique_data(job_dict):
job_dict = clear_data(job_dict)
save_data(job_dict)
return info_html['data']['numFound']
# 過濾重複數據
companyList = []
jobNameList = []
def unique_data(data):
if (data['jobname'] in jobNameList) & (data['company_name'] in companyList):
return False
else:
companyList.append(data['company_name'])
jobNameList.append(data['jobname'])
return data
# 數據清洗
def clear_data(data):
data['welfare'] = '/'.join([str(i) for i in data['welfare']])
pattern = re.compile('[\u4E00-\u9FA5]+')
data['company_size'] = pattern.sub('',data['company_size'])
return data
# 保存數據
def save_data(data):
data = '::'.join([str(i) for i in data.values()])
print(data)
with open('zlzp.txt', 'a+', encoding='utf-8') as file:
file.write(data + '\n')
# 主函數
if __name__ == '__main__':
'''
一.請求首頁
'''
starturl = 'https://www.zhaopin.com/'
job_tag_list = get_job_tag(starturl)
# print(job_tag_list)
'''
二.獲取職位詳細列表頁面
'''
start = 0
page = 1
while True:
infourl = 'https://fe-api.zhaopin.com/c/i/sou?start={0}pageSize=60&cityId=489&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw={1}&kt=3'
numFound = get_job_info(infourl, start, job_tag_list[0])
print('第{0}頁'.format(page))
if start<numFound:
start+=60
page+=1
time.sleep(0.5)
else:
break