国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

Python如何抓取天貓商品詳細(xì)信息及交易記錄

2020-01-04 15:48:16
字體:
供稿:網(wǎng)友

本文實(shí)例為大家分享了Python抓取天貓商品詳細(xì)信息及交易記錄的具體代碼,供大家參考,具體內(nèi)容如下

一、搭建Python環(huán)境

本帖使用的是Python 2.7
涉及到的模塊:spynner, scrapy, bs4, pymmssql

二、要獲取的天貓數(shù)據(jù)

三、數(shù)據(jù)抓取流程

四、源代碼

 

#coding:utf-8import spynnerfrom scrapy.selector import Selectorfrom bs4 import BeautifulSoupimport randomimport pymssql#------------------------接數(shù)據(jù)庫-----------------------------#server="localhost"user="sa"password = "123456"conn=pymssql.connect(server,user,password,"TmallData")if conn:  print "DataBase connecting successfully!"else:  print "DataBase connecting error!"cursor=conn.cursor()#----------------------定義網(wǎng)頁操作函數(shù)--------------------------#def py_click_element(browser,pos):  #點(diǎn)擊網(wǎng)頁中的元素  #pos example:'a[href="#description" rel="external nofollow" rel="external nofollow" ]'  browser.click(pos)  browser.wait(random.randint(3,10))  return browserdef py_click_xpath(browser,xpath):  xpath=xpath+'/@href'  inner_href=Selector(text=browser.html).xpath(xpath).extract()  pos='a[href="'+str(inner_href[0])+'" rel="external nofollow" ]'  browser=py_click_element(browser, pos)  return browserdef py_webpage_load(browser,url):  browser.load(url,load_timeout=60)  browser.wait(10)  return browserdef py_check_element(browser,xpath):  #按照xpath查找元素,如果存在則返回True,否則返回False  if Selector(text=browser.html).xpath(xpath).extract()!=[]:    return True  else:    return Falsedef py_extract_xpath(browser,xpath):  if py_check_element(browser, xpath):    return Selector(text=browser.html).xpath(xpath).extract()[0]  else:    return "none"def py_extract_xpaths(browser,xpaths):  #批量提取網(wǎng)頁內(nèi)容  length=len(xpaths)  results=[0]*length  for i in range(length):    results[i]=py_extract_xpath(browser, xpaths[i])  return results#-----------------------------數(shù)據(jù)庫操作函數(shù)---------------------------##-----------------------------數(shù)據(jù)提取函數(shù)----------------------------#def py_getDealReord(doc):  soup=BeautifulSoup(doc,'lxml')  tr=soup.find_all('tr')  total_dealRecord=[([0]*5)for i in range(len(tr))]   i=-1  for this_tr in tr:    i=i+1    td_user=this_tr.find_all('td',attrs={'class':"cell-align-l buyer"})    for this_td in td_user:      total_dealRecord[i][0]=this_td.getText().strip(' ')      #print username    td_style=this_tr.find_all('td',attrs={'class':"cell-align-l style"})    for this_td in td_style:      total_dealRecord[i][1]=this_td.getText(',').strip(' ')      #print style    td_quantity=this_tr.find_all('td',attrs={'class':"quantity"})    for this_td in td_quantity:      total_dealRecord[i][2]=this_td.getText().strip(' ')      #print quantity    td_dealtime=this_tr.find_all('td',attrs={'class':"dealtime"})    for this_td in td_dealtime:      total_dealRecord[i][3]=this_td.find('p',attrs={'class':"date"}).getText()      total_dealRecord[i][4]=this_td.find('p',attrs={'class':"time"}).getText()  return total_dealRecord#--------------------獲取要抓取的所有商品鏈接-----------------------#cursor.execute("""select * from ProductURLs where BrandName='NB'""")file=open("H://Eclipse//TmallCrawling//HTMLParse//errLog.txt")InProductInfo=cursor.fetchall()browser=spynner.Browser()for temp_InProductInfo in InProductInfo:  url='https:'+temp_InProductInfo[2]  BrandName=temp_InProductInfo[0]  ProductType=temp_InProductInfo[1]  print BrandName,'/t',ProductType,'/t',url  #url= 'https://detail.tmall.com/item.htm?id=524425656711&rn=77636d6db8dea5e30060976fdaf9768d&abbucket=19'   try:    browser=py_webpage_load(browser, url)  except:    print "Loading webpage failed."    file.write(url)    file.write('/n')    continue  xpaths=['//*[@id="J_PromoPrice"]/dd/div/span/text()',/    '//*[@id="J_StrPriceModBox"]/dd/span/text()',/    '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[1]/h1/text()',/    '//*[@id="J_PostageToggleCont"]/p/span/text()',/    '//*[@id="J_EmStock"]/text()',/    '//*[@id="J_CollectCount"]/text()',/    '//*[@id="J_ItemRates"]/div/span[2]/text()',/    '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/ul/li[1]/div/span[2]/text()']  out_ProductInfo=py_extract_xpaths(browser,xpaths)  browser=py_click_element(browser,'a[href="#description" rel="external nofollow" rel="external nofollow" ]')  ProductProperty=py_extract_xpath(browser, '//*[@id="J_AttrUL"]')  soup=BeautifulSoup(ProductProperty,'lxml')  li=soup.find_all('li')  prop=''  for this_li in li:    prop=prop+this_li.getText()+'//'  prop=prop[0:len(prop)-1]  out_ProductProperty=prop  print out_ProductProperty  cursor.execute("""  Insert into py_ProductInfo values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)  """,(BrandName,ProductType,url,/     out_ProductInfo[2],out_ProductInfo[1],/     out_ProductInfo[0],out_ProductInfo[7],/     out_ProductInfo[1],out_ProductInfo[3],/     out_ProductInfo[4],out_ProductInfo[5],/     out_ProductProperty))  conn.commit()  Deal_PageCount=0  browser=py_click_element(browser, 'a[href="#J_DealRecord" rel="external nofollow" ]')  #browser.browse(True)  DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')  out_DealRecord=py_getDealReord(DealRecord)  for temp_DealRecord in out_DealRecord:    if str(temp_DealRecord[4])=='0':      continue    cursor.execute("""    Insert into DealRecord values(%s,%s,%s,%s,%s,%s)    """,(url,temp_DealRecord[0],temp_DealRecord[1],/       temp_DealRecord[2],temp_DealRecord[3],/       temp_DealRecord[4]))    conn.commit()  Deal_PageCount=Deal_PageCount+1  print "Page ",Deal_PageCount  for i in range(6):    if (i==0) or (i==2):      continue    xpath='//*[@id="J_showBuyerList"]/div/div/a['+str(i)+']'    if py_check_element(browser,xpath):      browser=py_click_xpath(browser, xpath)      DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')      out_DealRecord=py_getDealReord(DealRecord)      for temp_DealRecord in out_DealRecord:        if str(temp_DealRecord[4])=='0':          continue        cursor.execute("""        Insert into DealRecord values(%s,%s,%s,%s,%s,%s)        """,(url,temp_DealRecord[0],temp_DealRecord[1],/           temp_DealRecord[2],temp_DealRecord[3],/           temp_DealRecord[4]))        conn.commit()      Deal_PageCount=Deal_PageCount+1      print "Page ",Deal_PageCount  while py_check_element(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]'):    browser=py_click_xpath(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]')    DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')    out_DealRecord=py_getDealReord(DealRecord)    for temp_DealRecord in out_DealRecord:      if str(temp_DealRecord[4])=='0':        continue      cursor.execute("""      Insert into DealRecord values(%s,%s,%s,%s,%s,%s)      """,(url,temp_DealRecord[0],temp_DealRecord[1],/         temp_DealRecord[2],temp_DealRecord[3],/         temp_DealRecord[4]))      conn.commit()    Deal_PageCount=Deal_PageCount+1    print "Page ",Deal_PageCount

以上就是本文的全部內(nèi)容,希望對(duì)大家的學(xué)習(xí)有所幫助,也希望大家多多支持VEVB武林網(wǎng)。


注:相關(guān)教程知識(shí)閱讀請(qǐng)移步到python教程頻道。
發(fā)表評(píng)論 共有條評(píng)論
用戶名: 密碼:
驗(yàn)證碼: 匿名發(fā)表
主站蜘蛛池模板: 阳信县| 资溪县| 榆树市| 崇州市| 揭东县| 开阳县| 石阡县| 揭阳市| 原阳县| 蓬莱市| 静安区| 科技| 克拉玛依市| 富民县| 湟中县| 淅川县| 建阳市| 关岭| 奉贤区| 澳门| 蒙山县| 方城县| 曲周县| 酒泉市| 广宗县| 大同县| 黑龙江省| 洪湖市| 浏阳市| 安福县| 玉溪市| 九江县| 衢州市| 玉龙| 炎陵县| 康马县| 故城县| 余干县| 安宁市| 玉田县| 博客|