普通爬蟲
from urllib import request, error
import re
headers = ('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0')
opener = request.build_opener()
opener.addheaders = [headers]
request.install_opener(opener)
for i in range(1, 2):
url = 'http://xiaohua.zol.com.cn/new/' + str(i) + '.html'
file = './data/' + str(i) + '.html'
try:
data = request.urlopen(url).read().decode('utf-8', 'ignore')
pat = 'target="_blank" href="(.*?)" class="all-read"'
ret = re.compile(pat).findall(data)
# 爬取笑話網全文
for j in range(0, len(ret)):
all_url = 'http://xiaohua.zol.com.cn' + ret[j]
all_data = request.urlopen(all_url).read().decode('gbk', 'ignore')
all_pat = '<p></p>(.*?)<p></p>'
# re.S,讓.匹配換行
all_ret = re.compile(all_pat, re.S).findall(all_data)
print(all_ret)
except error.URLError as e:
if hasattr(e, 'code'):
print(e.code)
if hasattr(e, 'reason'):
print(e.reason)
except Exception as e:
print(e)
多線程爬蟲
多線程
import threading
# 定義A、B線程
class A(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
for i in range(0, 100):
print('我是A菜')
class B(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
for i in range(0, 10):
print('我是B菜')
# 開啓線程
t1 = A()
t1.start()
t2 = B()
t2.start()
多線程爬蟲
from urllib import request, error
import re
import threading
headers = ('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0')
opener = request.build_opener()
opener.addheaders = [headers]
request.install_opener(opener)
class One(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# 爬取奇數頁
for i in range(1, 30, 2):
get_data(i)
class Two(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# 爬取偶數頁
for i in range(2, 30, 2):
get_data(i)
def get_data(i):
url = 'http://xiaohua.zol.com.cn/new/' + str(i) + '.html'
try:
data = request.urlopen(url).read().decode('utf-8', 'ignore')
pat = 'target="_blank" href="(.*?)" class="all-read"'
ret = re.compile(pat).findall(data)
# 爬取笑話網全文
for j in range(0, len(ret)):
all_url = 'http://xiaohua.zol.com.cn' + ret[j]
all_data = request.urlopen(all_url).read().decode('gbk', 'ignore')
all_pat = '<p></p>(.*?)<p></p>'
# re.S,讓.匹配換行
all_ret = re.compile(all_pat, re.S).findall(all_data)
print(all_ret)
except error.URLError as e:
if hasattr(e, 'code'):
print(e.code)
if hasattr(e, 'reason'):
print(e.reason)
except Exception as e:
print(e)
a = One()
b = Two()
a.start()
b.start()