国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

python多線程抓取天涯帖子內容示例

2019-11-25 18:27:50
字體:
來源:轉載
供稿:網友

使用re, urllib, threading 多線程抓取天涯帖子內容,設置url為需抓取的天涯帖子的第一頁,設置file_name為下載后的文件名

復制代碼 代碼如下:

#coding:utf-8

import urllib
import re
import threading
import os, time

class Down_Tianya(threading.Thread):
    """多線程下載"""
    def __init__(self, url, num, dt):
        threading.Thread.__init__(self)
        self.url = url
        self.num = num
        self.txt_dict = dt

    def run(self):
        print 'downling from %s' % self.url
        self.down_text()

    def down_text(self):
        """根據傳入的url抓出各頁內容,按頁數做鍵存入字典"""
        html_content =urllib.urlopen(self.url).read()
        text_pattern = re.compile('<span>時間:(.*?)</span>.*?<!-- <div class="host-ico">樓主</div> -->.*?<div class="bbs-content.*?>/s*(.*?)</div>', re.DOTALL)
        text = text_pattern.findall(html_content)
        text_join = ['/r/n/r/n/r/n/r/n'.join(item) for item in text]
        self.txt_dict[self.num] = text_join

 



def page(url):
    """根據第一頁地址抓取總頁數"""
    html_page = urllib.urlopen(url).read()
    page_pattern = re.compile(r'<a href="/S*?">(/d*)</a>/s*<a href="/S*?" class="/S*?">下頁</a>')
    page_result = page_pattern.search(html_page)
    if page_result:
        page_num = int(page_result.group(1))
        return page_num

 

def write_text(dict, fn):
    """把字典內容按鍵(頁數)寫入文本,每個鍵值為每頁內容的list列表"""
    tx_file = open(fn, 'w+')
    pn = len(dict)
    for i in range(1, pn+1):
        tx_list = dict[i]
        for tx in tx_list:
            tx = tx.replace('<br>', '/r/n').replace('<br />', '/r/n').replace(' ', '')
            tx_file.write(tx.strip()+'/r/n'*4)
    tx_file.close()


def main():
    url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
    file_name ='abc.txt'
    my_page = page(url)
    my_dict = {}

    print 'page num is : %s' % my_page

    threads = []

    """根據頁數構造urls進行多線程下載"""
    for num in range(1, my_page+1):
        myurl = '%s%s.shtml' % (url[:-7], num)
        downlist = Down_Tianya(myurl, num, my_dict)
        downlist.start()
        threads.append(downlist)

    """檢查下載完成后再進行寫入"""
    for t in threads:
        t.join()

    write_text(my_dict, file_name)

    print 'All download finished. Save file at directory: %s' % os.getcwd()

if __name__ == '__main__':
    main()

down_tianya.py

復制代碼 代碼如下:

#coding:utf-8

import urllib
import re
import threading
import os

class Down_Tianya(threading.Thread):
    """多線程下載"""
    def __init__(self, url, num, dt):
        threading.Thread.__init__(self)
        self.url = url
        self.num = num
        self.txt_dict = dt

    def run(self):
        print 'downling from %s' % self.url
        self.down_text()

    def down_text(self):
        """根據傳入的url抓出各頁內容,按頁數做鍵存入字典"""
        html_content =urllib.urlopen(self.url).read()
        text_pattern = re.compile('<div class="atl-item".*?<span>時間:(.*?)</span>.*?<!-- <div class="host-ico">樓主</div> -->.*?<div class="bbs-content.*?>/s*(.*?)</div>', re.DOTALL)
        text = text_pattern.findall(html_content)
        text_join = ['/r/n/r/n/r/n/r/n'.join(item) for item in text]
        self.txt_dict[self.num] = text_join

 



def page(url):
    """根據第一頁地址抓取總頁數"""
    html_page = urllib.urlopen(url).read()
    page_pattern = re.compile(r'<a href="/S*?">(/d*)</a>/s*<a href="/S*?" class="/S*?">下頁</a>')
    page_result = page_pattern.search(html_page)
    if page_result:
        page_num = int(page_result.group(1))
        return page_num

 

def write_text(dict, fn):
    """把字典內容按鍵(頁數)寫入文本,每個鍵值為每頁內容的list列表"""
    tx_file = open(fn, 'w+')
    pn = len(dict)
    for i in range(1, pn+1):
        tx_list = dict[i]
        for tx in tx_list:
            tx = tx.replace('<br>', '/r/n').replace('<br />', '/r/n').replace(' ', '')
            tx_file.write(tx.strip()+'/r/n'*4)
    tx_file.close()


def main():
    url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
    file_name ='abc.txt'
    my_page = page(url)
    my_dict = {}

    print 'page num is : %s' % my_page

    threads = []

    """根據頁數構造urls進行多線程下載"""
    for num in range(1, my_page+1):
        myurl = '%s%s.shtml' % (url[:-7], num)
        downlist = Down_Tianya(myurl, num, my_dict)
        downlist.start()
        threads.append(downlist)

    """檢查下載完成后再進行寫入"""
    for t in threads:
        t.join()

    write_text(my_dict, file_name)

    print 'All download finished. Save file at directory: %s' % os.getcwd()

if __name__ == '__main__':
    main()

發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 保康县| 西平县| 白沙| 大悟县| 合作市| 兴义市| 蒲江县| 江陵县| 广水市| 双流县| 水城县| 塔城市| 保德县| 岳西县| 惠州市| 民权县| 青冈县| 五寨县| 萍乡市| 仙居县| 玉田县| 修文县| 阜城县| 重庆市| 池州市| 保康县| 横峰县| 九龙坡区| 洛川县| 京山县| 冷水江市| 彭州市| 如皋市| 厦门市| 丽水市| 英山县| 视频| 盱眙县| 巴林右旗| 三台县| 揭阳市|