class PostdemoSpider(scrapy.Spider):
name = 'postDemo'
# allowed_domains = ['www.baidu.com']
start_urls = ['https://fanyi.baidu.com/sug']
'''
最開始是有start_request函數的,默認是get請求
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url=url,callback=self.parse)
發起post請求常見方式:
一定要對start_requests方法進行重寫。
Request()方法中給method屬性賦值成post
FormRequest()進行post請求的發送
'''
def start_requests(self):
print('start_request')
data = {'kw': 'dog'}
for url in self.start_urls:
yield scrapy.FormRequest(url=url, formdata=data, callback=self.parse)
def parse(self, response):
# print(response.text)
pass
cookie
post請求之後自動處理cookie,直接處理相關頁面即可,注意callback
一定要注意新建工程的設置
import scrapy
class DoubanSpider(scrapy.Spider):
name = 'douban'
# allowed_domains = ['www.douban.com']
start_urls = ['https://accounts.douban.com/j/mobile/login/basic']
def start_requests(self):
print('start')
for url in self.start_urls:
data={
'ck':'',
'name':'',
'password':'',
'remember':'false',
'ticket':''
}
yield scrapy.FormRequest(url=url,formdata=data,callback=self.parse)
def parse(self, response):
print('登陸')
url='https://www.douban.com/people/193627830/'
yield scrapy.Request(url=url, callback=self.parseBySecond)
def parseBySecond(self,response):
print('寫入')
with open ('./test.html','w',encoding='utf-8') as f:
f.write(response.text)
代理
就是在中間件裏自定義一個類,然後寫個process_request
request.meta[‘proxy’] =‘https://代理ip'
settings裏設置,每次請求都會被更改
class DoubanSpider(scrapy.Spider):
name = 'douban'
# allowed_domains = ['www.douban.com']
start_urls = ['https://www.baidu.com/s?wd=ip']
def parse(self, response):
with open('baidu.html','w',encoding='utf-8') as f:
f.write(response.text)