圆月山庄资源网 Design By www.vgjia.com
本文实例讲述了Python 实现的微信爬虫。分享给大家供大家参考,具体如下:
单线程版:
import urllib.request import urllib.parse import urllib.error import re,time headers = ("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36") operner = urllib.request.build_opener() operner.addheaders = [headers] urllib.request.install_opener(operner) list_url = [] ###使用代理获取网页url内容 def use_proxy(url): try: # proxy = urllib.request.ProxyHandler({'http':proxy_addr}) ##使用代理版 # operner = urllib.request.build_opener() # urllib.request.install_opener(operner) headers = ("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36") operner = urllib.request.build_opener() operner.addheaders = [headers] urllib.request.install_opener(operner) data = urllib.request.urlopen(url).read().decode('utf-8') # print (data) return data except urllib.error.URLError as e: if hasattr(e, "code"): print(e.code) elif hasattr(e, "reason"): print(e.reason) except Exception as e: print("exception" + str(e)) time.sleep(1) ##获取要爬取的url def get_url(key, pagestart, pageend): try: keycode = urllib.parse.quote(key) for page in range(pagestart, pageend + 1): url = "http://weixin.sogou.com/weixin" % ( keycode, page) data1 = use_proxy(url) #print("data1的内容是", data1) listurl_pattern = '<h3>.*"http://.*"amp;", "").split(" ")[0].replace("\"", "") list_url.append(res) #print(list_url) return list_url except urllib.error.URLError as e: if hasattr(e, "code"): print(e.code) elif hasattr(e, "reason"): print(e.reason) except Exception as e: print("exception:", e) ##通过获取的url爬行内容数据并处理 def get_url_content(list_url): fh1=open("D:\\python-script\\1.html", 'wb') html1 = '''<!DOCTYPE html>\n<html xmlns="http://www.w3.org/1999/xhmtl">\n<head>\n<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\n<title>微信文章</title></head>\n<body>''' fh1.write(html1.encode("utf-8")) fh1.close() fh = open("D:\\python-script\\1.html", 'ab') for url in list_url: data_content = use_proxy(url) #print (data_content) #sys.exit() title_pattern = '<h2.*>.*"<h2 class=\"rich_media_title\" id=\"activity-name\">", "").replace("</h2>", "").strip() content_pattern = 'id="js_content">(.*"rich_media_tool" id="js_sg_bar">' content = re.compile(content_pattern, re.S).findall(data_content) try: fh.write(res_title.encode("utf-8")) for i in content: fh.write(i.strip().encode("utf-8")) except UnicodeEncodeError as e: continue fh.write("</body></html>".encode("utf-8")) if __name__ == '__main__': pagestart = 1 pageend = 2 key = "人工智能" get_url(key, pagestart, pageend) get_url_content(list_url)
多线程版:
import urllib.request import urllib.parse import urllib.error import re,time import queue import threading headers = ("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36") operner = urllib.request.build_opener() operner.addheaders = [headers] urllib.request.install_opener(operner) urlque = queue.Queue() list_url = [] ###使用代理获取网页url内容 def use_proxy(url): try: # proxy = urllib.request.ProxyHandler({'http':proxy_addr}) # operner = urllib.request.build_opener() # urllib.request.install_opener(operner) headers = ("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3107.4 Safari/537.36") operner = urllib.request.build_opener() operner.addheaders = [headers] urllib.request.install_opener(operner) data = urllib.request.urlopen(url).read().decode('utf-8') #print (data) return data except urllib.error.URLError as e: if hasattr(e,"code"): print (e.code) elif hasattr(e,"reason"): print (e.reason) except Exception as e: print ("exception"+str(e)) time.sleep(1) ###获取文章的url连接,并将连接加入到队列 class get_url(threading.Thread): def __init__(self,key,pagestart,pageend,urlque): threading.Thread.__init__(self) self.pagestart = pagestart self.pageend = pageend self.key = key self.urlque = urlque def run(self): try: keycode = urllib.parse.quote(self.key) for page in range(self.pagestart,self.pageend+1): url = "http://weixin.sogou.com/weixin" % (keycode,page) data = use_proxy(url) print ("data1的内容是",data) listurl_pattern = '<h3>.*"http://.*"没有可用的url") sys.exit() for i in range(len(result)): res = result[i].replace("amp;","").split(" ")[0].replace("\"" ,"") #list_url.append(res) #加入列表 self.urlque.put(res) ##加入队列 self.urlque.task_done() #return list_url except urllib.error.URLError as e: if hasattr(e, "code"): print(e.code) elif hasattr(e, "reason"): print(e.reason) except Exception as e: print ("exception:",e) ##根据url获取文章内容 class get_url_content(threading.Thread): def __init__(self,urlque): threading.Thread.__init__(self) self.urlque = urlque def run(self): fh1 = open("D:\\python-script\\1.html", 'wb') html1 = '''<!DOCTYPE html>\n<html xmlns="http://www.w3.org/1999/xhmtl">\n<head>\n<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\n<title>微信文章</title></head>\n<body>''' fh1.write(html1.encode("utf-8")) fh1.close() fh = open("D:\\python-script\\1.html", 'ab') while True: try: url = self.urlque.get() data_content = use_proxy(url) title_pattern = '<h2.*>.*"<h2 class=\"rich_media_title\" id=\"activity-name\">", "").replace("</h2>","").strip() content_pattern = 'id="js_content">(.*"rich_media_tool" id="js_sg_bar">' content = re.compile(content_pattern, re.S).findall(data_content) #c = '<p style="max-width: 100%;box-sizing: border-box;min-height: 1em;text-indent: 2em;word-wrap: break-word !important;">' # for i in content: # ##内容 # c_content=i.replace(c, "").replace("<br /></p>", "").replace("</p>", "") fh.write(res_title.encode("utf-8")) for i in content: fh.write(i.strip().encode("utf-8")) except UnicodeEncodeError as e: continue fh.close() class contrl(threading.Thread): def __init__(self,urlqueue): threading.Thread.__init__(self) self.urlqueue = urlqueue while True: print ("程序正在执行") if self.urlqueue.empty(): time.sleep(3) print ("程序执行完毕") exit() if __name__ == '__main__': pagestart = 1 pageend = 2 key = "人工智能" get_url = get_url(key,pagestart,pageend,urlque) get_url.start() get_content = get_url_content(urlque) get_content.start() cntrol = contrl(urlque) cntrol.start()
更多关于Python相关内容可查看本站专题:《Python Socket编程技巧总结》、《Python正则表达式用法总结》、《Python数据结构与算法教程》、《Python函数使用技巧总结》、《Python字符串操作技巧汇总》、《Python入门与进阶经典教程》及《Python文件与目录操作技巧汇总》
希望本文所述对大家Python程序设计有所帮助。
标签:
Python,微信爬虫
圆月山庄资源网 Design By www.vgjia.com
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
圆月山庄资源网 Design By www.vgjia.com
暂无评论...
更新日志
2024年11月06日
2024年11月06日
- 雨林唱片《赏》新曲+精选集SACD版[ISO][2.3G]
- 罗大佑与OK男女合唱团.1995-再会吧!素兰【音乐工厂】【WAV+CUE】
- 草蜢.1993-宝贝对不起(国)【宝丽金】【WAV+CUE】
- 杨培安.2009-抒·情(EP)【擎天娱乐】【WAV+CUE】
- 周慧敏《EndlessDream》[WAV+CUE]
- 彭芳《纯色角3》2007[WAV+CUE]
- 江志丰2008-今生为你[豪记][WAV+CUE]
- 罗大佑1994《恋曲2000》音乐工厂[WAV+CUE][1G]
- 群星《一首歌一个故事》赵英俊某些作品重唱企划[FLAC分轨][1G]
- 群星《网易云英文歌曲播放量TOP100》[MP3][1G]
- 方大同.2024-梦想家TheDreamer【赋音乐】【FLAC分轨】
- 李慧珍.2007-爱死了【华谊兄弟】【WAV+CUE】
- 王大文.2019-国际太空站【环球】【FLAC分轨】
- 群星《2022超好听的十倍音质网络歌曲(163)》U盘音乐[WAV分轨][1.1G]
- 童丽《啼笑姻缘》头版限量编号24K金碟[低速原抓WAV+CUE][1.1G]