如何用Python爬取boss直聘職業信息

環境準備

  1. Python 3.7
  2. Mysql
  3. requests
  4. PyMySQL
  5. BeautifulSoup

獲取數據

分析數據請求過程

  1. 首先我們打開boss直聘網查看正常的請求過程

1579421064467-6e1a2f88-a622-4654-b573-72ec5d49ce4f.png

  1. 打開萬能的 F12,刷新,查看下當前網絡發生了什麼

1579421171057-114697be-3982-47f6-884c-e5ef0c6d5543.png
通過上圖能夠看到請求參數中包含了職位,頁數等信息。

構造模擬請求

import requests
url = "https://www.zhipin.com/c101120100/?query=" + kw+"&page="+str(page)+"&ka=page-"+str(page)
headers = {
    'Host': 'www.zhipin.com',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
    'Accept-Encoding': 'gzip, deflate, br',
    'Referer': 'https://www.zhipin.com/job_detail/?city=101120100&source=10&query=PHP',
    'DNT': '1',
    'Connection': 'keep-alive',
    'Cookie': '__c=1579400100; __g=-; __l=l=https%3A%2F%2Fwww.zhipin.com%2Fweb%2Fcommon%2Fsecurity-check.html%3Fseed%3DEWjlLZs%252FPr8cqa5Hs%252FGzdK13lMxlscxjvlJZWtdzaQs%253D%26name%3D986ad753%26ts%3D1579400102260%26callbackUrl%3D%252Fjob_detail%252F%253Fcity%253D101120100%2526source%253D10%2526query%253DPHP%26srcReferer%3D&r=&friend_source=0&friend_source=0; __a=83048337.1579400100..1579400100.11.1.11.11; __zp_stoken__=f0d1JSxtXmdA15ixnd1Lh9vbs1Yr2dghco%2FMt7MWfOXsroaplWan5qqBsdTxTRJMadp2RpuuULVCxSdPrFHXeLlCNNw5OdJC3nz6lIaV0p2mXbKx6jgrj3tQ4%2B4zcEDE2SBk',
    'Upgrade-Insecure-Requests': '1',
    'Cache-Control': 'max-age=0',
    'TE': 'Trailers'
}
r = requests.get(url, headers=headers)

所有請求頭原封不動拷下來,放入 headerscookie 需要拷貝自己瀏覽器的內容,單獨使用上面的會失效。

分析網頁結構

解析出需要的信息。這一步我們用到了 BeautifulSoup
1579421520496-de1c119d-1a31-4f72-a510-8ed374232317.png
分析發現所有的職位都有類 job-primary

soup = BeautifulSoup(r.text, "lxml")
all_jobs = soup.select("div .job-primary")

繼續分析
1579421733876-7a3092a5-96ad-4e78-89f8-61e2d8a66491.png
自此,基本信息獲取完畢,下一步組裝信息。

組裝信息

for job in all_jobs:
    jname = job.find("div", attrs={"class": "job-title"}).text
    jurl = "https://www.zhipin.com" + \
    job.find("div", attrs={"class": "info-primary"}).h3.a.attrs['href']
    jid = job.find(
        "div", attrs={"class": "info-primary"}).h3.a.attrs['data-jid']
    sal = job.find("div", attrs={"class": "info-primary"}).h3.a.span.text
    info_contents = job.find(
        "div", attrs={"class": "info-primary"}).p.contents
    addr = info_contents[0]

    if len(info_contents) == 3:
        work_year = "無數據"
        edu = job.find(
            "div", attrs={"class": "info-primary"}).p.contents[2]
    elif len(info_contents) == 5:
        work_year = job.find(
            "div", attrs={"class": "info-primary"}).p.contents[2]
        edu = job.find(
            "div", attrs={"class": "info-primary"}).p.contents[4]
    elif len(info_contents) == 7:
        work_year = job.find(
            "div", attrs={"class": "info-primary"}).p.contents[-3]
        edu = job.find(
            "div", attrs={"class": "info-primary"}).p.contents[-1]
        company = job.find("div", attrs={"class": "company-text"}).h3.a.text
        company_type = job.find(
            "div", attrs={"class": "company-text"}).p.contents[0]
        company_staff = job.find(
            "div", attrs={"class": "company-text"}).p.contents[-1]
    print(jid, jname, jurl, sal, addr, work_year,
              edu, company, company_type, company_staff)
    infos.append({
        "jid": jid,
        "name": jname,
        "sal": sal,
        "addr": addr,
        "work_year": work_year,
        "edu": edu,
        "company": company,
        "company_type": company_type,
        "company_staff": company_staff,
        "url": jurl})
print("%s職位信息,第%d頁抓取完成" % (kw, page))
# print(infos)

完整代碼

def get_one_page_info(kw, page):
    url = "https://www.zhipin.com/c101120100/?query=" + \
        kw+"&page="+str(page)+"&ka=page-"+str(page)
    headers = {
        'Host': 'www.zhipin.com',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Accept-Encoding': 'gzip, deflate, br',
        'Referer': 'https://www.zhipin.com/job_detail/?city=101120100&source=10&query=PHP',
        'DNT': '1',
        'Connection': 'keep-alive',
        'Cookie': '__c=1579400100; __g=-; __l=l=https%3A%2F%2Fwww.zhipin.com%2Fweb%2Fcommon%2Fsecurity-check.html%3Fseed%3DEWjlLZs%252FPr8cqa5Hs%252FGzdK13lMxlscxjvlJZWtdzaQs%253D%26name%3D986ad753%26ts%3D1579400102260%26callbackUrl%3D%252Fjob_detail%252F%253Fcity%253D101120100%2526source%253D10%2526query%253DPHP%26srcReferer%3D&r=&friend_source=0&friend_source=0; __a=83048337.1579400100..1579400100.11.1.11.11; __zp_stoken__=f0d1JSxtXmdA15ixnd1Lh9vbs1Yr2dghco%2FMt7MWfOXsroaplWan5qqBsdTxTRJMadp2RpuuULVCxSdPrFHXeLlCNNw5OdJC3nz6lIaV0p2mXbKx6jgrj3tQ4%2B4zcEDE2SBk',
        'Upgrade-Insecure-Requests': '1',
        'Cache-Control': 'max-age=0',
        'TE': 'Trailers'
    }
    r = requests.get(url, headers=headers)
    soup = BeautifulSoup(r.text, "lxml")
    # print(soup)
    # 先獲取每一行的列表數據
    all_jobs = soup.select("div.job-primary")
    infos = []
    for job in all_jobs:
        jname = job.find("div", attrs={"class": "job-title"}).text
        jurl = "https://www.zhipin.com" + \
            job.find("div", attrs={"class": "info-primary"}).h3.a.attrs['href']
        jid = job.find(
            "div", attrs={"class": "info-primary"}).h3.a.attrs['data-jid']
        sal = job.find("div", attrs={"class": "info-primary"}).h3.a.span.text
        info_contents = job.find(
            "div", attrs={"class": "info-primary"}).p.contents
        addr = info_contents[0]
       
        if len(info_contents) == 3:
            work_year = "無數據"
            edu = job.find(
                "div", attrs={"class": "info-primary"}).p.contents[2]
        elif len(info_contents) == 5:
            work_year = job.find(
                "div", attrs={"class": "info-primary"}).p.contents[2]
            edu = job.find(
                "div", attrs={"class": "info-primary"}).p.contents[4]
        elif len(info_contents) == 7:
            work_year = job.find(
                "div", attrs={"class": "info-primary"}).p.contents[-3]
            edu = job.find(
                "div", attrs={"class": "info-primary"}).p.contents[-1]
        company = job.find("div", attrs={"class": "company-text"}).h3.a.text
        company_type = job.find(
            "div", attrs={"class": "company-text"}).p.contents[0]
        company_staff = job.find(
            "div", attrs={"class": "company-text"}).p.contents[-1]
        print(jid, jname, jurl, sal, addr, work_year,
              edu, company, company_type, company_staff)
        infos.append({
            "jid": jid,
            "name": jname,
            "sal": sal,
            "addr": addr,
            "work_year": work_year,
            "edu": edu,
            "company": company,
            "company_type": company_type,
            "company_staff": company_staff,
            "url": jurl})
    print("%s職位信息,第%d頁抓取完成" % (kw, page))
    # print(infos)
    return infos

保存數據

創建數據庫

簡單創建個數據表

DROP TABLE IF EXISTS `boss_job`;
CREATE TABLE `boss_job`  (
  `jid` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
  `name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
  `sal` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
  `addr` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
  `work_year` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
  `edu` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
  `company` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
  `company_type` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
  `company_staff` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
  `url` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
  PRIMARY KEY (`jid`) USING BTREE
) ENGINE = InnoDB CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;

保存到數據庫

def save_mysql(infos):
    '''保存每一頁的數據到數據庫中'''
    db = pymysql.connect("localhost", "root",
                         "root", "boss", charset="utf8")
    cursor = db.cursor()
    for job in infos:
        sql = "insert into boss_job values('%(jid)s','%(name)s','%(sal)s','%(addr)s','%(work_year)s'\
        ,'%(edu)s','%(company)s','%(company_type)s','%(company_staff)s','%(url)s');" % (job)
        try:
            cursor.execute(sql)
        except pymysql.Error as e:
            print("數據庫出錯", e)
            db.rollback()
        else:
            db.commit()

主程序

for i in range(1, 11):
    infos = get_one_page_info("java", i)
    save_mysql(infos)

自此,整個爬蟲編寫完畢,當然還有改進的餘地,比如多線程,cookie的自動的獲取等等。數據也可以保存到excel文件便於更好的分析。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章