复制代码 代码如下:
#!/usr/bin/python
#-*-coding:utf-8-*-
# JCrawler
# Author: Jam <810441377@qq.com>
import time
import urllib2
from bs4 import BeautifulSoup
# 目标站点
TargetHost = "http://adirectory.blog.com"
# User Agent
UserAgent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36'
# 链接采集规则
# 目录链接采集规则
CategoryFind = [{'findMode':'find','findTag':'div','rule':{'id':'cat-nav'}},
{'findMode':'findAll','findTag':'a','rule':{}}]
# 文章链接采集规则
ArticleListFind = [{'findMode':'find','findTag':'div','rule':{'id':'content'}},
{'findMode':'findAll','findTag':'h2','rule':{'class':'title'}},
{'findMode':'findAll','findTag':'a','rule':{}}]
# 分页URL规则
PageUrl = 'page/#page/'
PageStart = 1
PageStep = 1
PageStopHtml = '404: Page Not Found'
def GetHtmlText(url):
request = urllib2.Request(url)
request.add_header('Accept', "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp")
request.add_header('Accept-Encoding', "*")
request.add_header('User-Agent', UserAgent)
return urllib2.urlopen(request).read()
def ArrToStr(varArr):
returnStr = ""
for s in varArr:
returnStr += str(s)
return returnStr
def GetHtmlFind(htmltext, findRule):
findReturn = BeautifulSoup(htmltext)
returnText = ""
for f in findRule:
if returnText != "":
findReturn = BeautifulSoup(returnText)
if f['findMode'] == 'find':
findReturn = findReturn.find(f['findTag'], f['rule'])
if f['findMode'] == 'findAll':
findReturn = findReturn.findAll(f['findTag'], f['rule'])
returnText = ArrToStr(findReturn)
return findReturn
def GetCategory():
categorys = [];
htmltext = GetHtmlText(TargetHost)
findReturn = GetHtmlFind(htmltext, CategoryFind)
for tag in findReturn:
print "[G]->Category:" + tag.string + "|Url:" + tag['href']
categorys.append({'name': tag.string, 'url': tag['href']})
return categorys;
def GetArticleList(categoryUrl):
articles = []
page = PageStart
#pageUrl = PageUrl
while True:
htmltext = ""
pageUrl = PageUrl.replace("#page", str(page))
print "[G]->PageUrl:" + categoryUrl + pageUrl
while True:
try:
htmltext = GetHtmlText(categoryUrl + pageUrl)
break
except urllib2.HTTPError,e:
print "[E]->HTTP Error:" + str(e.code)
if e.code == 404:
htmltext = PageStopHtml
break
if e.code == 504:
print "[E]->HTTP Error 504: Gateway Time-out, Wait"
time.sleep(5)
else:
break
if htmltext.find(PageStopHtml) >= 0:
print "End Page."
break
else:
findReturn = GetHtmlFind(htmltext, ArticleListFind)
for tag in findReturn:
if tag.string != None and tag['href'].find(TargetHost) >= 0:
print "[G]->Article:" + tag.string + "|Url:" + tag['href']
articles.append({'name': tag.string, 'url': tag['href']})
page += 1
return articles;
print "[G]->GetCategory"
Mycategorys = GetCategory();
print "[G]->GetCategory->Success."
time.sleep(3)
for category in Mycategorys:
print "[G]->GetArticleList:" + category['name']
GetArticleList(category['url'])
python,爬虫,爬虫
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
更新日志
- 雨林唱片《赏》新曲+精选集SACD版[ISO][2.3G]
- 罗大佑与OK男女合唱团.1995-再会吧!素兰【音乐工厂】【WAV+CUE】
- 草蜢.1993-宝贝对不起(国)【宝丽金】【WAV+CUE】
- 杨培安.2009-抒·情(EP)【擎天娱乐】【WAV+CUE】
- 周慧敏《EndlessDream》[WAV+CUE]
- 彭芳《纯色角3》2007[WAV+CUE]
- 江志丰2008-今生为你[豪记][WAV+CUE]
- 罗大佑1994《恋曲2000》音乐工厂[WAV+CUE][1G]
- 群星《一首歌一个故事》赵英俊某些作品重唱企划[FLAC分轨][1G]
- 群星《网易云英文歌曲播放量TOP100》[MP3][1G]
- 方大同.2024-梦想家TheDreamer【赋音乐】【FLAC分轨】
- 李慧珍.2007-爱死了【华谊兄弟】【WAV+CUE】
- 王大文.2019-国际太空站【环球】【FLAC分轨】
- 群星《2022超好听的十倍音质网络歌曲(163)》U盘音乐[WAV分轨][1.1G]
- 童丽《啼笑姻缘》头版限量编号24K金碟[低速原抓WAV+CUE][1.1G]