此處使用了selenium插件 使用的是火狐瀏覽器 信息存儲到csv表格裏面
前面詳細不多講如果條件不滿足自行百度安裝 相比上一版本稍微改進 可以輸入多詞
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 20:21:04 2019
@author: Administrator
"""
from selenium import webdriver
import time
import random
import csv
import codecs
li_list = ["洗手液","襪子","衛生紙"]#格式 li_list = ["洗手液","襪子","衛生紙"]
#要爬取的網址 此處網址爲搜索詳細產品出現的產品搜索結果頁
#注意蘇寧搜索行業詞出現的產品頁面是不一樣的
wangzhi = "https://search.suning.com/"
#此處爲要爬取的頁數默認爲 50頁
yema = 50
#查數用的沒啥實際作用
num = 1
#codevs 防止中文寫入時亂碼
f = codecs.open('123suning.csv','a',encoding='utf-8')
csv_writer = csv.writer(f)
#谷歌
#browser = webdriver.chrom.webdirver.WebDriver(executable_path="chromedriver")
#火狐
browser1 = webdriver.Firefox(executable_path="geckodriver")
def browser_1(url,browser=browser1):
#打開網頁
browser.get(url)
'''
cookie_list = [{這裏放登陸賬號後的cookie 注意跟下面的有區別}]
for item in cookie_list: browser.add_cookie({
'domain': 'search.suning.com',
'name': item['name'],
'value': item['value'],
'path': '/',
'expires': None
})
'''
return(browser)
def browser_2(url,browser=browser1):
#打開網頁
browser.get(url)
'''
cookie_list = [{這裏放登陸賬號後的cookie.注意跟上面的有區別}]
for item in cookie_list: browser.add_cookie({
'domain': 'product.suning.com',
'name': item['name'],
'value': item['value'],
'path': '/',
'expires': None
})
'''
return(browser)
for li in li_list:
print(li)
browser = browser_1(wangzhi)
input_txt = browser.find_element_by_id("sTxt")
input_txt.send_keys(li)
browser.find_element_by_id("sBtn").click()
time.sleep(random.randint(2,5))
try:
browser.find_element_by_class_name("close-btn").click()
except:
print(li)
#通過class找到元素
#input_guanggao = browser.find_element_by_class_name("close-btn")
#點一下
#input_guanggao.click()
#輸入
#input_txt.send_keys("111")
#翻頁鍵
#next_page = browser.find_element_by_class_name("next")
#數據提取
urls = []
shopid_list = []
nub = 1
for i in range(yema):
print(i)
#將滾動條拖到底部
js="var q=document.documentElement.scrollTop=100000"
browser.execute_script(js)
time.sleep(random.randint(3,5))
shops = browser.find_elements_by_class_name("sellPoint")
for shop in shops:
#print(shop)
#print("*"*10)
url = shop.get_attribute('href')
vip = "/0000000000/"
#去掉重複店鋪
if "https://product.suning.com/" in url:
shopid = url[27:37]
#print(shopid)
if (vip not in url) & (shopid not in shopid_list):
shopid_list.append(shopid)
urls.append(url)
print(i,"--",nub,"--",url)
print(shopid)
nub += 1
print(i,"頁")
i += 1
js="var q=document.documentElement.scrollTop=500"
browser.execute_script(js)
time.sleep(random.randint(2,3))
if i == yema:
break
else:
next_page = browser.find_element_by_class_name("next")
time.sleep(random.randint(2,3))
next_page.click()
time.sleep(random.randint(2,5))
print("---"*10)
for ul in urls:
browser_shop = browser_2(ul)
time.sleep(2)
# if browser_shop.find_element_by_id("chead_companyName"):
# driver.quit()
try:
#公司名稱
chead_companyName = browser_shop.find_element_by_id("chead_companyName")
#電話
chead_telPhone = browser_shop.find_element_by_id("chead_telPhone")
#地址
chead_companyAddress = browser_shop.find_element_by_id("chead_companyAddress")
browser_shop.find_element_by_id("chead_road").click()
#chead_telPhone.find_element_by_xpath("//*[contains(text(),'13816391436')]").click()
companyName = chead_companyName.text
if companyName == "":
companyName = "null"
telPhone = chead_telPhone.text
if telPhone == "":
telPhone = "null"
companyAddress = chead_companyAddress.text
if companyAddress == "":
companyAddress = "null"
num += 1
print(companyName,"==",telPhone,"==",companyAddress,"/",li,"/",num)
csv_writer.writerow([companyName,telPhone,companyAddress])
except:
print("錯誤頁面")
#browser_shop.close()
f.close()
print("結束")
僅供交流學習!