国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁(yè) > 編程 > Python > 正文

基于python實(shí)現(xiàn)的抓取騰訊視頻所有電影的爬蟲(chóng)

2019-11-25 16:49:48
字體:
來(lái)源:轉(zhuǎn)載
供稿:網(wǎng)友

我搜集了國(guó)內(nèi)10幾個(gè)電影網(wǎng)站的數(shù)據(jù),里面近幾十W條記錄,用文本沒(méi)法存,mongodb學(xué)習(xí)成本非常低,安裝、下載、運(yùn)行起來(lái)不會(huì)花你5分鐘時(shí)間。

# -*- coding: utf-8 -*-# by awakenjoys. my site: www.dianying.atimport reimport urllib2from bs4 import BeautifulSoupimport string, timeimport pymongo NUM  = 0   #全局變量,電影數(shù)量m_type = u''  #全局變量,電影類(lèi)型m_site = u'qq' #全局變量,電影網(wǎng)站 #根據(jù)指定的URL獲取網(wǎng)頁(yè)內(nèi)容def gethtml(url): req = urllib2.Request(url)  response = urllib2.urlopen(req)  html = response.read() return html #從電影分類(lèi)列表頁(yè)面獲取電影分類(lèi)def gettags(html): global m_type soup = BeautifulSoup(html)  #過(guò)濾出分類(lèi)內(nèi)容 #print soup #<ul class="clearfix _group" gname="mi_type" gtype="1"> tags_all = soup.find_all('ul', {'class' : 'clearfix _group' , 'gname' : 'mi_type'}) #print len(tags_all), tags_all #print str(tags_all[1]).replace('/n', '')  #<a _hot="tag.sub" class="_gtag _hotkey"  title="動(dòng)作" tvalue="0">動(dòng)作</a> re_tags = r'<a _hot=/"tag/.sub/" class=/"_gtag _hotkey/" href=/"(.+?)/" title=/"(.+?)/" tvalue=/"(.+?)/">.+?</a>' p = re.compile(re_tags, re.DOTALL)  tags = p.findall(str(tags_all[0])) if tags:  tags_url = {}  #print tags  for tag in tags:   tag_url = tag[0].decode('utf-8')   #print tag_url   m_type = tag[1].decode('utf-8')   tags_url[m_type] = tag_url      else:   print "Not Find" return tags_url #獲取每個(gè)分類(lèi)的頁(yè)數(shù)def get_pages(tag_url): tag_html = gethtml(tag_url) #div class="paginator soup = BeautifulSoup(tag_html)  #過(guò)濾出標(biāo)記頁(yè)面的html #print soup #<div class="mod_pagenav" id="pager"> div_page = soup.find_all('div', {'class' : 'mod_pagenav', 'id' : 'pager'}) #print div_page #len(div_page), div_page[0]  #<a class="c_txt6"  title="25"><span>25</span></a> re_pages = r'<a class=.+?><span>(.+?)</span></a>' p = re.compile(re_pages, re.DOTALL) pages = p.findall(str(div_page[0])) #print pages if len(pages) > 1:  return pages[-2] else:  return 1   def getmovielist(html): soup = BeautifulSoup(html)  #<ul class="mod_list_pic_130"> divs = soup.find_all('ul', {'class' : 'mod_list_pic_130'}) #print divs for div_html in divs:  div_html = str(div_html).replace('/n', '')  #print div_html  getmovie(div_html)  def getmovie(html): global NUM global m_type global m_site  #<h6 class="caption"> <a  target="_blank" title="徒步旅行隊(duì)">徒步旅行隊(duì)</a> </h6> <ul class="info"> <li class="desc">法國(guó)賣(mài)座喜劇片</li> <li class="cast"> </li> </ul> </div> <div class="ext ext_last"> <div class="ext_txt"> <h3 class="ext_title">徒步旅行隊(duì)</h3> <div class="ext_info"> <span class="ext_area">地區(qū): 法國(guó)</span> <span class="ext_cast">導(dǎo)演: </span> <span class="ext_date">年代: 2009</span> <span class="ext_type">類(lèi)型: 喜劇</span> </div> <p class="ext_intro">理查德?達(dá)奇擁有一家小的旅游公司,主要經(jīng)營(yíng)法國(guó)游客到非洲大草原的旅游服務(wù)。六個(gè)法國(guó)游客決定參加理查德?達(dá)奇組織的到非洲的一...</p>  re_movie = r'<li><a class=/"mod_poster_130/" href=/"(.+?)/" target=/"_blank/" title=/"(.+?)/"><img.+?</li>' p = re.compile(re_movie, re.DOTALL) movies = p.findall(html) if movies:  conn = pymongo.Connection('localhost', 27017)  movie_db = conn.dianying  playlinks = movie_db.playlinks  #print movies  for movie in movies:   #print movie   NUM += 1   print "%s : %d" % ("=" * 70, NUM)   values = dict(    movie_title = movie[1],    movie_url = movie[0],    movie_site  = m_site,    movie_type  = m_type    )   print values   playlinks.insert(values)   print "_" * 70   NUM += 1   print "%s : %d" % ("=" * 70, NUM)  #else: # print "Not Find" def getmovieinfo(url): html = gethtml(url) soup = BeautifulSoup(html)  #pack pack_album album_cover divs = soup.find_all('div', {'class' : 'pack pack_album album_cover'}) #print divs[0]  #<a  target="new" title="《血滴子》獨(dú)家紀(jì)錄片" wl="1"> </a>  re_info = r'<a href=/"(.+?)/" target=/"new/" title=/"(.+?)/" wl=/".+?/"> </a>' p_info = re.compile(re_info, re.DOTALL) m_info = p_info.findall(str(divs[0])) if m_info:  return m_info else:  print "Not find movie info"  return m_info  def insertdb(movieinfo): global conn movie_db = conn.dianying_at movies = movie_db.movies movies.insert(movieinfo) if __name__ == "__main__": global conn  tags_url = "http://v.qq.com/list/1_-1_-1_-1_1_0_0_20_0_-1_0.html" #print tags_url tags_html = gethtml(tags_url) #print tags_html tag_urls = gettags(tags_html) #print tag_urls   for url in tag_urls.items():  print str(url[1]).encode('utf-8') #,url[0]  maxpage = int(get_pages(str(url[1]).encode('utf-8')))  print maxpage   for x in range(0, maxpage):   #http://v.qq.com/list/1_0_-1_-1_1_0_0_20_0_-1_0.html   m_url = str(url[1]).replace('0_20_0_-1_0.html', '')   movie_url = "%s%d_20_0_-1_0.html" % (m_url, x)   print movie_url   movie_html = gethtml(movie_url.encode('utf-8'))   #print movie_html   getmovielist(movie_html)   time.sleep(0.1)

發(fā)表評(píng)論 共有條評(píng)論
用戶(hù)名: 密碼:
驗(yàn)證碼: 匿名發(fā)表
主站蜘蛛池模板: 和平区| 石林| 武川县| 保德县| 福建省| 雷山县| 鹤峰县| 海盐县| 甘南县| 南和县| 东山县| 威海市| 九龙县| 卢龙县| 武定县| 宝坻区| 嘉峪关市| 讷河市| 佛学| 德保县| 平阳县| 柯坪县| 涟源市| 额尔古纳市| 淮南市| 天峻县| 乌拉特后旗| 东方市| 哈巴河县| 清水河县| 饶河县| 卢龙县| 德昌县| 库尔勒市| 岳普湖县| 安岳县| 彩票| 宜春市| 明星| 厦门市| 靖江市|