Day-62 ‘简单’的爬虫

from urllib.error import URLError
from urllib.request import urlopen

import re
import pymysql


def get_page_code(start_url, *, retry_times=3, charsets=('utf-8', )):
    try:
        for charset in charsets:
            try:
                html = urlopen(start_url).read().decode(charset)
                break
            except UnicodeDecodeError:
                html = None
    except URLError as ex:
        print('Error:', ex)
        return get_page_code(start_url, retry_times=retry_times - 1, charsets=charsets) if \
            retry_times > 0 else None
    return html


def main():
    url_list = ['http://sports.sohu.com/nba_a.shtml','http://quote.eastmoney.com/centerv2/hsbk']
    visited_list = set({})
    while len(url_list) > 0:
        current_url = url_list.pop(0)
        visited_list.add(current_url)
        print(current_url)
        html = get_page_code(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
        if html:
            link_regex = re.compile(r'<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
            link_list = re.findall(link_regex, html)
            url_list += link_list
            conn = pymysql.connect(host='localhost', port=3306,
                                   db='bots', user='root',
                                   passwd='123456', charset='utf8')
            try:
                for link in link_list:
                    if link not in visited_list:
                        visited_list.add(link)
                        print(link)
                        html = get_page_code(link, charsets=('utf-8', 'gbk', 'gb2312'))
                        if html:
                            title_regex = re.compile(r'<h1>(.*)<span', re.IGNORECASE)
                            match_list = title_regex.findall(html)
                            if len(match_list) > 0:
                                title = match_list[0]
                                with conn.cursor() as cursor:
                                    cursor.execute('insert into tb_result (rtitle, rurl) values (%s, %s)',
                                               (title, link))
                                conn.commit()
            finally:
                conn.close()
    print('执行完成!')


if __name__ == '__main__':
    main()

这一堆洋洋洒洒的代码我抄都要抄错,还不知道错在哪儿。总之是通过导入第三方包和正则表达式,可以从相应的网页爬取相应的节点内容,比如超链接比如标签图片等,然后持久化的存入数据库中。逻辑上似懂非懂了,实际上是晕了。

当然有更简单但效率较低的办法,比如bs4。


完蛋了,看样子我得降级。感觉从web知识到python基础都欠缺太多!最近连博客都写不出来,全靠抄就太过分了。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章