国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

Python實現的爬蟲功能代碼

2020-01-04 16:54:06
字體:
來源:轉載
供稿:網友

本文實例講述了Python實現的爬蟲功能。分享給大家供大家參考,具體如下:

主要用到urllib2、BeautifulSoup模塊

#encoding=utf-8import reimport requestsimport urllib2import datetimeimport MySQLdbfrom bs4 import BeautifulSoupimport sysreload(sys)sys.setdefaultencoding("utf-8")class Splider(object):  def __init__(self):  print u'開始爬取內容...'  ##用來獲取網頁源代碼  def getsource(self,url):  headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2652.0 Safari/537.36'}  req = urllib2.Request(url=url,headers=headers)  socket = urllib2.urlopen(req)  content = socket.read()  socket.close()  return content  ##changepage用來生產不同頁數的鏈接  def changepage(self,url,total_page):    now_page = int(re.search('page/(/d+)',url,re.S).group(1))  page_group = []  for i in range(now_page,total_page+1):    link = re.sub('page/(/d+)','page/%d' % i,url,re.S)    page_group.append(link)  return page_group  #獲取字內容  def getchildrencon(self,child_url):  conobj = {}  content = self.getsource(child_url)  soup = BeautifulSoup(content, 'html.parser', from_encoding='utf-8')  content = soup.find('div',{'class':'c-article_content'})  img = re.findall('src="(.*?)"',str(content),re.S)  conobj['con'] = content.get_text()  conobj['img'] = (';').join(img)  return conobj  ##獲取內容  def getcontent(self,html_doc):  soup = BeautifulSoup(html_doc, 'html.parser', from_encoding='utf-8')  tag = soup.find_all('div',{'class':'promo-feed-headline'})  info = {}  i = 0  for link in tag:    info[i] = {}    title_desc = link.find('h3')    info[i]['title'] = title_desc.get_text()    post_date = link.find('div',{'class':'post-date'})    pos_d = post_date['data-date'][0:10]    info[i]['content_time'] = pos_d    info[i]['source'] = 'whowhatwear'    source_link = link.find('a',href=re.compile(r"section=fashion-trends"))    source_url = 'http://www.whowhatwear.com'+source_link['href']    info[i]['source_url'] = source_url    in_content = self.getsource(source_url)    in_soup = BeautifulSoup(in_content, 'html.parser', from_encoding='utf-8')    soup_content = in_soup.find('section',{'class':'widgets-list-content'})    info[i]['content'] = soup_content.get_text().strip('/n')    text_con = in_soup.find('section',{'class':'text'})    summary = text_con.get_text().strip('/n') if text_con.text != None else NULL    info[i]['summary'] = summary[0:200]+'...';    img_list = re.findall('src="(.*?)"',str(soup_content),re.S)    info[i]['imgs'] = (';').join(img_list)    info[i]['create_time'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")    i+=1  #print info  #exit()  return info  def saveinfo(self,content_info):  conn = MySQLdb.Connect(host='127.0.0.1',user='root',passwd='123456',port=3306,db='test',charset='utf8')  cursor = conn.cursor()  for each in content_info:    for k,v in each.items():    sql = "insert into t_fashion_spider2(`title`,`summary`,`content`,`content_time`,`imgs`,`source`,`source_url`,`create_time`) values ('%s','%s','%s','%s','%s','%s','%s','%s')" % (MySQLdb.escape_string(v['title']),MySQLdb.escape_string(v['summary']),MySQLdb.escape_string(v['content']),v['content_time'],v['imgs'],v['source'],v['source_url'],v['create_time'])    cursor.execute(sql)  conn.commit()  cursor.close()  conn.close()if __name__ == '__main__':  classinfo = []  p_num = 5  url = 'http://www.whowhatwear.com/section/fashion-trends/page/1'  jikesplider = Splider()  all_links = jikesplider.changepage(url,p_num)  for link in all_links:  print u'正在處理頁面:' + link  html = jikesplider.getsource(link)  info = jikesplider.getcontent(html)  classinfo.append(info)  jikesplider.saveinfo(classinfo)

 

希望本文所述對大家Python程序設計有所幫助。

發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 平顺县| 昂仁县| 岐山县| 墨竹工卡县| 丘北县| 吴江市| 新乡市| 洛南县| 利津县| 天气| 乳山市| 平潭县| 八宿县| 鲜城| 合水县| 淮南市| 锡林郭勒盟| 海伦市| 阳东县| 武宣县| 西昌市| 荥经县| 卫辉市| 库尔勒市| 庆城县| 崇义县| 陈巴尔虎旗| 东乌| 忻城县| 万盛区| 临漳县| 全南县| 屏东县| 河津市| 永和县| 彭阳县| 明溪县| 益阳市| 景宁| 凤阳县| 林州市|