圆月山庄资源网 Design By www.vgjia.com

最近在学习Python3网络爬虫开发实践(崔庆才 著)刚好也学习到他使用代理爬取公众号文章这里,但是照着他的代码写,出现了一些问题。在这里我用到了这本书的前面讲的一些内容进行了完善。(作者写这个代码已经是半年前的事了,但腾讯的网站在这半年前进行了更新)

下面我直接上代码:

TIMEOUT = 20
from requests import Request, Session, PreparedRequest
import requests
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from bs4 import BeautifulSoup as bs
import pymysql
 
# 要爬取的内容
keyword = '美女图片'
options = webdriver.ChromeOptions()
# 设置中文
options.add_argument('lang=zh_CN.UTF-8')
# 更换头部
options.add_argument(
 'user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"')
browser = webdriver.Chrome(chrome_options=options)
REDIS_HOST = '192.168.1.248'
REDIS_PORT = 6379
REDIS_PASSWORD = '*****'
REDIS_KEY = 'requests'
PROXY_POOL_URL = 'http://127.0.0.1:8080/random'
MAX_FAILED_TIME = 5
 
MYSQL_HOST = 'localhost'
MYSQL_PORT = 3306
MYSQL_USER = 'moxiao'
MYSQL_PASSWORD = '******'
 
 
class mysqlConn():
 def __init__(self, host=MYSQL_HOST, username=MYSQL_USER, password=MYSQL_PASSWORD, port=MYSQL_PORT):
  """
  mysql 初始化
  :param host:
  :param username:
  :param password:
  :param port:
  """
  try:
   self.db = pymysql.Connection(host=host, user=username, password=password,
           database='weixin_data', port=port)
   self.cursor = self.db.cursor()
  except pymysql.MySQLError as e:
   print(e.args)
 
 def insert(self, table, data):
  keys = ', '.join(data.keys())
  values = ', '.join(['%s'] * len(data))
  sql = 'insert into %s (%s) values (%s)' % (table, keys, values)
  try:
   self.cursor.execute(sql, tuple(data.values()))
   self.db.commit()
  except pymysql.MySQLError as e:
   print(e.args)
   self.db.rollback()
 
 
class WeixinRequest(Request):
 def __init__(self, url, callback, method="GET", headers=None, need_proxy=False, fail_time=0, timeout=TIMEOUT):
  super(WeixinRequest, self).__init__(url=url, method=method, headers=headers)
  self.callback = callback
  self.need_proxy = need_proxy
  self.fail_time = fail_time
  self.timeout = timeout
 
 def prepare(self):
  p = PreparedRequest()
  p.prepare(
   method=self.method,
   url=self.url,
   headers=self.headers,
  )
  return p
 
 
class WeixinResponse():
 
 def __init__(self, text):
  self.text = text
 
 def set_status_code(self, status_code):
  self.status_code = status_code
 
 
import pickle
from redis import StrictRedis
 
 
class RedisQueue():
 def __init__(self):
  """
   初始化redis
  """
  self.db = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, db=3)
 
 def add(self, request):
  """
   向队列添加序列化后的Request
  :param request:请求对象
  :return:添加结果
  """
  if isinstance(request, WeixinRequest):
   return self.db.rpush(REDIS_KEY, pickle.dumps(request))
  return False
 
 def pop(self):
  """
   取出下一个request并反序列化
  :return: Request 或者 None
  """
  if self.db.llen(REDIS_KEY):
   return pickle.loads(self.db.lpop(REDIS_KEY))
  return False
 
 def empty(self):
  return self.db.llen(REDIS_KEY) == 0
 
 def del_all(self):
  return self.db.delete(REDIS_KEY)
 
 def get_proxy(self):
  """
   从代理池获取代理IP
  :return:
  """
  try:
   response = requests.get(PROXY_POOL_URL)
   if response.status_code == 200:
    print('get Proxy', response.text)
    return response.text
  except requests.ConnectionError:
   return None
 
 
from urllib.parse import urlencode
from requests import ReadTimeout, ConnectionError
from pyquery import PyQuery as pq
 
VALD_STATUES = [200]
 
 
class Spider():
 base_url = 'http://weixin.sogou.com/weixin"""
   初始化工作
  :return:
  """
  # 全局更新headers
  # 如果你试过用这个方法修改headers,那么就知道这个在这里好像没什么用,我在这里浪费了至少两个小时
  self.session.headers.update(self.headers)
  start_url = self.base_url + urlencode(self.params)
  # 这里我将need_proxy=False设为了False 即并没有使用代理 ps:我也就是测试一下
  # 真正修改了headers是在这里
  weixin_request = WeixinRequest(url=start_url, callback=self.parse_index, headers=self.headers, need_proxy=False)
  # 调度第一个请求
  self.queue.add(weixin_request)
 
 def schedule(self):
  """
   调度请求
  :return:
  """
  while not self.queue.empty():
   weixin_request = self.queue.pop()
   callback = weixin_request.callback
   print('Schedule', weixin_request.url)
   response = self.request(weixin_request)
   if response and response.status_code in VALD_STATUES:
    results = list(callback(response))
    if results:
     for result in results:
      print('New Result', result)
      if isinstance(result, WeixinRequest):
       # 将新的文章详情的url也加入队列
       self.queue.add(result)
      if isinstance(result, dict):
       # 储存到mysql
       self.mysql.insert('articles', result)
    else:
     self.error(weixin_request)
   else:
    self.error(weixin_request)
 
 def request(self, weixin_request):
  """
   执行请求
  :param weixin_request:请求
  :return: 响应
  """
  if not 'http://mp.weixin.qq.com/s"""
   解析索引页
  :param response: 响应
  :return: 新的响应
  """
  doc = pq(response.text)
  items = doc('.news-box .news-list li .txt-box h3 a').items()
  for item in items:
   url = item.attr('href')
   weixin_request = WeixinRequest(url=url, callback=self.parse_detail)
   yield weixin_request
 
 def parse_detail(self, response):
  """
   解析详情页
  :param response: 响应
  :return: 微信公众号文章
  """
  doc = pq(response.text)
  profile_inner = doc('.profile_inner')
  data = {
   'title': doc('.rich_media_title').text(),
   'content': doc('.rich_media_content').text(),
   'date': doc('#publish_time').text(),
   # 'nickname':doc('#js_profile_qrcode > div > strong').text(),
   'nickname': profile_inner.find('.profile_nickname').text(),
   'wechat':
    [ns for ns in profile_inner.find('.profile_meta').find('.profile_meta_value').items()][
     0].text()
  }
  # 储存图片
  print('#' * 30)
  soup = bs(response.text)
  wn = soup.find_all('img')
  for img in wn:
   if img.has_attr('_width') and img.has_attr('data-src'):
    print(img.attrs['data-src'])
  yield data
 
 def error(self, weixin_request):
  """
   错误处理
  :param weixin_request:请求
  :return:
  """
  weixin_request.fail_time = weixin_request.fail_time + 1
  print('Request Failed', weixin_request.fail_time, 'Times', weixin_request.url)
  if weixin_request.fail_time < MAX_FAILED_TIME:
   self.queue.add(weixin_request)
 
 def run(self):
  self.start()
  self.schedule()
 
 
if __name__ == '__main__':
 spider = Spider()
 spider.run()

2018-10-6更新:

今天测试之后使用了cookie并不能登录这个网站了,也许是腾讯使用了新的安全验证,具体也无从得知,但使用浏览器访问没有问题

python爬取微信公众号文章的方法

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。

标签:
python爬取公众号文章,python爬取微信公众号文章,python爬取文章

圆月山庄资源网 Design By www.vgjia.com
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
圆月山庄资源网 Design By www.vgjia.com