urllib是python的一個獲取url(Uniform Resource Locators,統一資源定址器)了,我們可以利用它來抓取遠程的數據進行保存哦,下面整理了一些關於urllib使用中的一些關於header,代理,超時,認證,異常處理處理方法,下面一起來看看。
python3 抓取網頁資源的 N 種方法
1、最簡單
import urllib.request response = urllib.request.urlopen('http://python.org/') html = response.read()
2、使用 Request
#可以將url先構造成一個Request對象,傳進urlopen #Request存在的意義是便於在請求的時候傳入一些信息,而urlopen則不import urllib.requestreq = urllib.request.Request('http://python.org/')
response = urllib.request.urlopen(req)the_page = response.read() #print(response.read().decode('utf-8'))
3、發送數據
#! /usr/bin/env python3 import urllib.parseimport urllib.request url = 'http://localhost/login.php'user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'values = {'act' : 'login','login[email]' : '[email protected]','login[password]' : '123456'} data = urllib.parse.urlencode(values) req = urllib.request.Request(url, data) req.add_header('Referer', 'http://www.python.org/') response = urllib.request.urlopen(req) the_page = response.read()print(the_page.decode("utf8"))
#這裏就用到urllib.parse,通過bytes(urllib.parse.urlencode())可以將post數據進行轉換放到urllib.request.urlopen的data參數中。這樣就完成了一次post請求。
所以如果我們添加data參數的時候就是以post請求方式請求,如果沒有data參數就是get請求方式
4、發送數據和header
#! /usr/bin/env python3 import urllib.parseimport urllib.request url = 'http://localhost/login.php'user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'values = {'act' : 'login','login[email]' : '[email protected]','login[password]' : '123456'} headers = { 'User-Agent' : user_agent } data = urllib.parse.urlencode(values) req = urllib.request.Request(url, data, headers) response = urllib.request.urlopen(req) the_page = response.read()print(the_page.decode("utf8"))
5、http 錯誤
#! /usr/bin/env python3 import urllib.request req = urllib.request.Request('http://www.111cn.net ')try: urllib.request.urlopen(req) except urllib.error.HTTPError as e: print(e.code)print(e.read().decode("utf8"))
6、異常處理1
#! /usr/bin/env python3 from urllib.request import Request, urlopen from urllib.error import URLError, HTTPError req = Request("http://www.111cn.net /") try: response = urlopen(req) except HTTPError as e: print('The server couldn't fulfill the request.') print('Error code: ', e.code) except URLError as e: print('We failed to reach a server.') print('Reason: ', e.reason) else: print("good!") print(response.read().decode("utf8"))
7、異常處理2
#! /usr/bin/env python3 from urllib.request import Request, urlopen from urllib.error import URLError req = Request("http://www.111cn.net /") try: response = urlopen(req) except URLError as e: if hasattr(e, 'reason'): print('We failed to reach a server.') print('Reason: ', e.reason) elif hasattr(e, 'code'): print('The server couldn't fulfill the request.') print('Error code: ', e.code) else: print("good!") print(response.read().decode("utf8"))
8、HTTP 認證
#! /usr/bin/env python3 import urllib.request # create a password managerpassword_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm() # Add the username and password. # If we knew the realm, we could use it instead of None.top_level_url = "https://www.111cn.net /"password_mgr.add_password(None, top_level_url, 'rekfan', 'xxxxxx') handler = urllib.request.HTTPBasicAuthHandler(password_mgr) # create "opener" (OpenerDirector instance) opener = urllib.request.build_opener(handler) # use the opener to fetch a URLa_url = "https://www.111cn.net /"x = opener.open(a_url)print(x.read()) # Install the opener. # Now all calls to urllib.request.urlopen use our opener.urllib.request.install_opener(opener) a = urllib.request.urlopen(a_url).read().decode('utf8')print(a)
9、使用代理
#! /usr/bin/env python3 import urllib.request proxy_support = urllib.request.ProxyHandler({'sock5': 'localhost:1080'}) opener = urllib.request.build_opener(proxy_support) urllib.request.install_opener(opener) a = urllib.request.urlopen("http://www.111cn.net ").read().decode("utf8")print(a)
10、超時
#! /usr/bin/env python3 import socket import urllib.request # timeout in secondstimeout = 2socket.setdefaulttimeout(timeout) # this call to urllib.request.urlopen now uses the default timeout# we have set in the socket modulereq = urllib.request.Request('http://www.111cn.net /') a = urllib.request.urlopen(req).read()print(a)