国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > Python > 正文

python搭建微信公眾平臺

2019-11-25 16:56:45
字體:
供稿:網(wǎng)友

python基于新浪sae開發(fā)的微信公眾平臺,實現(xiàn)功能:

輸入段子---回復笑話
輸入開源+文章---發(fā)送消息到開源中國
輸入快遞+訂單號---查詢快遞信息
輸入天氣---查詢南京最近五天天氣狀況
輸入微博熱點---回復微博當前熱門話題
輸入電影+名稱---回復百度云盤中搜索的鏈接

具體實現(xiàn)代碼:

# -*- coding: utf-8 -*-import hashlibimport webimport lxmlimport timeimport osimport urllib2,jsonimport urllibimport reimport randomimport hashlibimport cookielibfrom urllib import urlencodefrom lxml import etree class WeixinInterface:   def __init__(self):    self.app_root = os.path.dirname(__file__)    self.templates_root = os.path.join(self.app_root, 'templates')    self.render = web.template.render(self.templates_root)        def GET(self):    #獲取輸入?yún)?shù)    data = web.input()    signature=data.signature    timestamp=data.timestamp    nonce=data.nonce    echostr=data.echostr    #自己的token    token="weixin9047" #這里改寫你在微信公眾平臺里輸入的token    #字典序排序    list=[token,timestamp,nonce]    list.sort()    sha1=hashlib.sha1()    map(sha1.update,list)    hashcode=sha1.hexdigest()    #sha1加密算法        #如果是來自微信的請求,則回復echostr    if hashcode == signature:      return echostr             def POST(self):        str_xml = web.data() #獲得post來的數(shù)據(jù)    xml = etree.fromstring(str_xml)#進行XML解析    content=xml.find("Content").text#獲得用戶所輸入的內(nèi)容    msgType=xml.find("MsgType").text    fromUser=xml.find("FromUserName").text    toUser=xml.find("ToUserName").text    if(content == u"天氣"):      url = "http://m.ip138.com/21/nanjing/tianqi/"      headers = {        'Connection': 'Keep-Alive',        'Accept': 'text/html, application/xhtml+xml, */*',        'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',        'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}      req = urllib2.Request(url, headers = headers)      opener = urllib2.urlopen(req)      html = opener.read()      rex = r'(?<=img src="/image/s[0-9].gif" alt=").{1,6}(?=" />)'      rexx = r'(?<=div class="temperature">).{5,15}(?=</div>)'      n = re.findall(rex,html)      m = re.findall(rexx,html)      str_wether = ""      for (i,j) in zip(m,n):        str_wether = str_wether + j + "   " +i + "/n"      return self.render.reply_text(fromUser,toUser,int(time.time()),"最近五天天氣:/n"+str_wether)    elif(content[0:2] == u"電影"):      keyword = urllib.quote(content[2:].encode("utf-8"))      url = "http://www.wangpansou.cn/s.php?q="+keyword      headers = {        'Connection': 'Keep-Alive',        'Accept': 'text/html, application/xhtml+xml, */*',        'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',        'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}      req = urllib2.Request(url, headers = headers)      opener = urllib2.urlopen(req)      html = opener.read()      rex = r'https?://pan.baidu.com.*/?uk=[0-9]{10}.*[/d+?]"'      m = re.findall(rex,html)           string = u""      for i in m:        string = string + i + "/n"      return self.render.reply_text(fromUser,toUser,int(time.time()),u"以下是電影鏈接:/n"+string)    elif(u"段子" in content):      url_8 = "http://www.qiushibaike.com/"      url_24 = "http://www.qiushibaike.com/hot/"      headers = {        'Connection': 'Keep-Alive',        'Accept': 'text/html, application/xhtml+xml, */*',        'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',        'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}      req_8 = urllib2.Request(url_8, headers = headers)      req_24 = urllib2.Request(url_24,headers = headers)      opener_8 = urllib2.urlopen(req_8)      opener_24 = urllib2.urlopen(req_24)      html_8 = opener_8.read()      html_24 = opener_24.read()      rex = r'(?<=div class="content">).*?(?=<!--)'      m_8 = re.findall(rex,html_8,re.S)      m_24 = re.findall(rex, html_24, re.S)      m_8.extend(m_24)      random.shuffle(m_8)      return self.render.reply_text(fromUser,toUser,int(time.time()),m_8[0].replace('<br/>',''))     elif(content[0:2] == u"開源"):      url = "https://www.oschina.net/action/user/hash_login"      urll = "http://www.oschina.net/action/tweet/pub"      username = "904727147@qq.com"      passw = ""      password = hashlib.sha1(passw).hexdigest()      cj = cookielib.CookieJar()      opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))      opener.addheaders = [('User-agent','Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0')]      urllib2.install_opener(opener)      data = {'email':username,'pwd':password}      data_post = urllib.urlencode(data)      opener.open(url, data_post)      user = "2391943"      msg = content[2:].encode("utf-8")      user_code = "lPFz26r3ZIa1e3KyIWlzPNpJlaEmZqyh6dAWAotd"      post = {'user_code':user_code,'user':user,'msg':msg}      msg_post = urllib.urlencode(post)      html = urllib2.urlopen(urll,msg_post).read()      return self.render.reply_text(fromUser,toUser,int(time.time()),u"發(fā)送到開源中國動彈成功!")     elif(content[0:2] == u"快遞"):      keyword = content[2:]      url = "http://www.kuaidi100.com/autonumber/autoComNum?text="+keyword      cj = cookielib.CookieJar()      opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))      opener.addheaders = [('User-agent','Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0')]      urllib2.install_opener(opener)      html = urllib2.urlopen(url).read()      jo = json.loads(html)      typ = jo["auto"][0]['comCode']      if(typ is None):        return self.render.reply_text(fromUser,toUser,int(time.time()),u"請檢查你的定單號!")       urll = "http://www.kuaidi100.com/query?type="+typ+"&postid="+keyword      html_end = urllib2.urlopen(urll).read()      jo_end = json.loads(html_end)      if(jo_end["status"] == "201"):        return self.render.reply_text(fromUser,toUser,int(time.time()),u"訂單號輸入有誤,請重新輸入!")       text = jo_end["data"]      string = u""      for i in text:        string = string + i["time"] + i["context"] + "/n"      return self.render.reply_text(fromUser,toUser,int(time.time()),string)     elif(content == u"微博熱點"):      url = "http://weibo.cn/pub/?tf=5_005"      headers = {            'Connection': 'Keep-Alive',            'Accept': 'text/html, application/xhtml+xml, */*',            'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',            'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}      req = urllib2.Request(url, headers = headers)      opener = urllib2.urlopen(req)      html = opener.read().decode("utf-8")      rex = r'(?<=div class="c"><a href=").{60,79}(?=</a>)'      ss = re.findall(rex,html)      string = u""      for i in ss:        string = string + i.replace('>','/n')+"/n"      return self.render.reply_text(fromUser,toUser,int(time.time()),string.replace('"',''))    elif(content == u"知乎信息"):      username = '18362983803'      password = ''      _xsrf='558c1b60725377c5810ae2484b26781e'      url = r'https://www.zhihu.com/login/phone_num'      cj = cookielib.CookieJar()      opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))      opener.addheaders = [('User-agent','Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0')]      data = urllib.urlencode({"phone_num":username,"password":password,'_xsrf':_xsrf})      opener.open(url,data)      html = opener.open('https://www.zhihu.com/noti7/new?r=1454793308655').read()      jo = json.loads(html)      data = jo[1]      string = "增長了:"+str(data[0])+"個評論"+str(data[1])+"個粉絲"+str(data[2])+"個贊同"      return self.render.reply_text(fromUser,toUser,int(time.time()),string)    elif(u"鐘志遠" in content):      return self.render.reply_text(fromUser,toUser,int(time.time()),u"你想找全世界最帥的人干嘛?如果你是妹子,請加微信18362983803!漢子繞道!")    elif(u"使用" in content):      return self.render.reply_text(fromUser,toUser,int(time.time()),u"搜電影:電影+電影名,最近天氣:天氣,微博熱門:微博熱點,快遞查詢:快遞+單號,看笑話:段子,發(fā)送動彈到開源中國:開源+內(nèi)容")    else:      url = r'http://www.xiaohuangji.com/ajax.php'      cj = cookielib.CookieJar()      opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))      opener.addheaders = [('User-agent','Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0')]      string = urllib.quote(content.encode("utf-8"))      try:        data = urllib.urlencode({"para":string})        html = opener.open(url,data).read()         string = html+"/n----[回復[使用]]"        return self.render.reply_text(fromUser,toUser,int(time.time()),string)      except Exception,ex:        return self.render.reply_text(fromUser,toUser,int(time.time()),u"我不想理你了~") 

以上就是本文的全部內(nèi)容,希望對大家的學習有所幫助。

發(fā)表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發(fā)表
主站蜘蛛池模板: 卫辉市| 通州市| 延寿县| 衡阳县| 文昌市| 安福县| 定兴县| 彝良县| 罗城| 通江县| 阿坝| 福贡县| 洪江市| 聊城市| 东方市| 同德县| 那坡县| 海城市| 天津市| 平顶山市| 通州市| 大姚县| 任丘市| 福清市| 新巴尔虎左旗| 长汀县| 资兴市| 天台县| 芦溪县| 柳州市| 呼伦贝尔市| 儋州市| 旬邑县| 赣榆县| 通许县| 湖南省| 岑溪市| 高清| 莎车县| 松原市| 南郑县|