
# -*- coding=utf-8 -*-import urllib as url_libimport urllib2 as url_lib2import bs4 as BS4import reimport osimport sysreload(sys)sys.setdefaultencoding('utf8')class get_joke(object):    def __init__(self, url_str):        self.url_str = url_str  # 糗事百科笑話鏈接的初始化鏈接        self.page_href = set()  # 糗事百科其他頁的鏈接        self.next_page = ""     # 下一頁的鏈接        # 糗事百科對爬蟲訪問進行了阻止,這里采用偽裝的形式進行訪問        self.user_agent = 'Mozilla/5.0 (X11; linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36'        self.headers = {"User-Agent": self.user_agent}    # 讀取網頁數據    def getwebpage(self, url_str = ""):        if "" is self.url_str:            if "" == url_str:                return None            else:                self.url_str = url_str        try:            request = url_lib2.Request(self.url_str, headers = self.headers)            webpage = url_lib2.urlopen(request).read().decode('utf-8')        except url_lib2.URLError as ex:            if hasattr(ex, "code"):                PRint(ex.code)            if hasattr(ex, "reason"):                print(ex.reason)        webpage = webpage.decode("utf-8")        return webpage    # 讀取糗事百科上面的文本笑話,包括文本笑話的作者,內容,點贊數目,評論數目    def getjoke_text(self, webpage):        if None is webpage:            return None        re_str = '<div.*?>.*?<div class="author.*?>.*?<img.*?alt="(.*?)"/>.*?<div class="content">		.*?<span>(.*?)</span>.*?<div class="stats">.*?<i class="number">(.*?)</i>.*?<span class="dash">		.*?<i class="number">(.*?)</i>.*?</div>'        re_obj = re.compile(re_str, re.S)   # re.S 標志代表在匹配時為點任意匹配模式,點 . 也可以代表換行符        jokes = re.findall(re_obj, webpage)        print("獲取到的笑話的個數:%d" % len(jokes))        joke_list = []        for joke in jokes:            temp_str = "%s" % joke[1]            temp_str = re.sub("(<br/>)", " ", temp_str)            print("發表者:%s"%joke[0] + "/n內容:%s"%temp_str + "/n點贊數目:%s"%joke[2] + "/n評論數目:%s/n"%joke[3])            joke_list.append([joke[0], joke[1], joke[2], joke[3]])        return joke_list    # 尋找網頁上的下一頁跳轉鏈接    def getnext_page(self, webpage):        if None is webpage:            return None        print('爬蟲開始獲得下一頁的鏈接。。。')        re_str = '<li>.*?<a href="(.*?/page/.*?)".*?>.*?<!--<.*?>-->.*?<span class="next">.*?</span>.*?</a>.*?</li>'        re_obj = re.compile(re_str, re.S)        next_href = re.findall(re_obj, webpage)        print("下一頁鏈接的個數:%d" % len(next_href))        if len(next_href) > 0:            nexthref = self.get_absoluteurl(next_href[0][6:])            print("得到的下一頁的鏈接為:%s" % nexthref)            return nexthref    # 獲取網頁的絕對路徑    def get_absoluteurl(self, url_str):        if url_str.startswith("http://"):            return url_str        if url_str.startswith("http://www."):            url_str = "http://" + url_str[11:]            return url_str        if url_str.startswith("www."):            url_str = "http://" + url_str[4:]            return url_str        else:            url_str = self.url_str + "/" + url_str            return url_str        if self.url_str not in url_str:  # 去除盜鏈            return None        return None    def write2file(self, list):        if None is list:            return None        else:            dir_str = os.path.abspath('.')            dir_str += "/jokes/"            print("文件保存路徑:%s" % dir_str)            if not os.path.exists(dir_str):                os.makedirs(dir_str)            dir_str += "joke.txt"            try:                my_file = open(dir_str, 'w')                for item in list:                    my_file.writelines(item[0]+"/t" + item[1]+"/t" + item[2]+"/t" + item[3]+"/t/n")            except IOError,ex:                my_file.close()                print ex                return None            finally:                my_file.close()# 主函數部分url_str = "http://www.qiushibaike.com/text"print("需要采集的網頁為:" + url_str)my_obj = get_joke(url_str)webpage = my_obj.getwebpage()my_obj.getnext_page(webpage)joke_list = my_obj.getjoke_text(webpage)my_obj.write2file(joke_list)3. 結果

新聞熱點
疑難解答