国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁(yè) > 編程 > Python > 正文

python通過(guò)urllib2爬網(wǎng)頁(yè)上種子下載示例

2019-11-25 18:30:25
字體:
來(lái)源:轉(zhuǎn)載
供稿:網(wǎng)友

通過(guò)urllib2、re模塊抓種子

思路

1.用程序登錄論壇(如果需要登錄才能訪問(wèn)的版塊)

2.訪問(wèn)指定版塊

3.遍歷帖子(先取指定頁(yè),再遍歷頁(yè)面所有帖子的url)

4.循環(huán)訪問(wèn)所有帖子url,從帖子頁(yè)面代碼中取種子下載地址(通過(guò)正則表達(dá)式或第三方頁(yè)面解析庫(kù))

5.訪問(wèn)種子頁(yè)面下載種子

復(fù)制代碼 代碼如下:

import urllib
import urllib2
import cookielib
import re
import sys
import os

# site is website address | fid is part id
site = "http://xxx.yyy.zzz/"
source = "thread0806.php?fid=x&search=&page="

btSave = "./clyzwm/"
if os.path.isdir(btSave):
 print btSave + " existing"
else:
 os.mkdir(btSave)

logfile = "./clyzwm/down.log"
errorfile = "./clyzwm/error.log"
sucfile = "./clyzwm/sucess.log"

headers = {'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36', 
           'Referer' : 'http://xxx.yyy.zzz/'}

def btDown(url, dirPath):
 logger(logfile, "download file : " + url)
 try:
  #pageCode = urllib2.urlopen(url).read()
  #print pageCode
  btStep1 = re.findall('http://[/w]+/.[/w]+/.[/w]{0,4}/[/w]{2,6}/.php/?[/w]{2,6}=([/w]+)', url, re.I)
  #print btStep1
  if len(btStep1)>0:
   ref = btStep1[0]
   downsite = ""
   downData = {}
   if len(ref)>20:
    downsite = re.findall('http://www.[/w]+/.[/w]+/', url)[0]
    downsite = downsite + "download.php"
    reff = re.findall('input/stype=/"hidden/"/sname=/"reff/"/svalue=/"([/w=]+)/"', urllib2.urlopen(url).read(), re.I)[0]
    downData = {'ref': ref, 'reff':reff, 'submit':'download'}
   else:
    downsite = "http://www.downhh.com/download.php"
    downData = {'ref': ref, 'rulesubmit':'download'}
   #print "bt site - " +  downsite + "/n downData:"
   #print downData
   downData = urllib.urlencode(downData)
   downReq = urllib2.Request(downsite, downData)
   downReq.add_header('User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36')
   downPost = urllib2.urlopen(downReq)
   stream = downPost.read(-1)
   if (len(stream) > 1000):
    downPost.close()
    name = btStep1[0]+ ".torrent"
    fw = open(dirPath + name, 'w')
    fw.write(stream)
    fw.close()
    logger(sucfile, url+"/n")
   else:
    logger(errorfile, url+"/n")
 except urllib2.URLError, e:
  print e.reason

def logger(logfile, msg):
 print msg
 fw = open(logfile, 'a')
 fw.write(msg)
 fw.close()

for i in range(1, 1000):
 logger(logfile, "/n/n/n@ page " + str(i) + " ...")
 part = site + source + str(i)

 content = urllib2.urlopen(part).read()
 content = content.decode('gbk').encode('utf8')
 #print content

 pages = re.findall('<a/s+href=/"(htm_data/[/d]+/[/d]+/[/d]+/.html).*?<//a>', content,re.I)
 #print pages

 for page in pages:
  page = site + page;
  #logger(logfile, "/n# visiting " + page + " ...")
  pageCode = urllib2.urlopen(page).read()
  #print pageCode
  zzJump = re.findall('http://www.viidii.info//?http://[/w]+/[/w]+/?[/w]{2,6}=[/w]+' ,pageCode)  
  #zzJump = re.findall('http://www.viidii.info//?http://[/w//?=]*', pageCode)
  if len(zzJump) > 0:
   zzJump = zzJump[0]
   #print "- jump page - " + zzJump
   pageCode = urllib2.urlopen(page).read()
   zzPage = re.findall('http://[/w]+/.[/w]+/.[/w]+/link[/w]?/.php/?[/w]{2,6}=[/w]+' ,pageCode)
   if len(zzPage) > 0:
    zzPage = zzPage[0]
    logger(logfile, "/n- zhongzi page -" + zzPage)
    btDown(zzPage, btSave)
   else:
    logger(logfile, "/n. NOT FOUND .")
  else:
   logger(logfile, "/n... NOT FOUND ...")
  zzPage = re.findall('http://[/w]+/.[/w]+/.[/w]+/link[/w]?/.php/?ref=[/w]+' ,pageCode)

發(fā)表評(píng)論 共有條評(píng)論
用戶名: 密碼:
驗(yàn)證碼: 匿名發(fā)表
主站蜘蛛池模板: 新龙县| 三门县| 镇原县| 永昌县| 蕉岭县| 南安市| 门源| 雷山县| 大城县| 河南省| 贵阳市| 丁青县| 礼泉县| 麻江县| 蒲城县| 梁山县| 准格尔旗| 乌恰县| 安岳县| 绥芬河市| 盐池县| 嘉荫县| 安顺市| 嘉荫县| 武邑县| 西乌| 河北区| 太和县| 钟祥市| 柳江县| 宣汉县| 铜山县| 永仁县| 博白县| 吉安市| 婺源县| 灌阳县| 招远市| 怀宁县| 兰考县| 罗山县|