圆月山庄资源网 Design By www.vgjia.com
入门级爬虫:只抓取书籍名称,信息及下载地址并存储到数据库
数据库工具类:DBUtil.py
import pymysql class DBUtils(object): def connDB(self): #连接数据库 conn=pymysql.connect(host='192.168.251.114',port=3306, user='root',passwd='b6f3g2',db='yangsj',charset='utf8'); cur=conn.cursor(); return (conn,cur); def exeUpdate(self,conn,cur,sql): #更新或插入操作 sta=cur.execute(sql); conn.commit(); return (sta); def exeDelete(self,conn,cur,IDs): #删除操作 demo 没用到 sta=0; for eachID in IDs.split(' '): sta+=cur.execute("delete from students where Id=%d"%(int(eachID))); conn.commit(); return (sta); def exeQuery(self,cur,sql): #查找操作 effect_row = cur.execute(sql); return (effect_row,cur); def connClose(self,conn,cur): #关闭连接,释放资源 cur.close(); conn.close(); if __name__ == '__main__': dbUtil = DBUtils(); conn,cur = dbUtil.connDB();
书籍操作文件 bookOpe.py
from DBUtil import DBUtils from bookInfo import Book from bookInfo import DownLoadInfo import logging logging.basicConfig( level=logging.INFO ) class BookOperator(object): def __addBook(self,book): logging.info("add book:%s" % book.bookName); dbUtil = DBUtils(); conn,cur = dbUtil.connDB(); insertBookSql = ("insert into book (bookName,bookUrl,bookInfo) values ('%s','%s','%s');"%(book.bookName,book.downLoadUrl,book.mainInfo)); dbUtil.exeUpdate(conn,cur,insertBookSql); dbUtil.connClose(conn,cur); def __selectLastBookId(self): logging.info("selectLastBookId "); dbUtil = DBUtils(); conn,cur = dbUtil.connDB(); selectLastBookSql = "select id from book order by id desc limit 1"; effect_row,cur = dbUtil.exeQuery(cur,selectLastBookSql); bookId = cur.fetchone()[0]; dbUtil.connClose(conn,cur); return bookId; def __addBookDownLoadInfos(self,downLoadInfos,bookId): logging.info("add bookId:%s" % bookId); dbUtil = DBUtils(); conn,cur = dbUtil.connDB(); for downLoadinfo in downLoadInfos: insertBookDownLoadInfo = ("insert into book_down_url (bookId,downName,downUrl) values ('%s','%s','%s');"%(bookId,downLoadinfo.downName,downLoadinfo.downUrl)); dbUtil.exeUpdate(conn,cur,insertBookDownLoadInfo); dbUtil.connClose(conn,cur); def addBookInfo(self,book): logging.info("add bookInfo:%s" % book.bookName); self.__addBook(book); bookId = self.__selectLastBookId(); self.__addBookDownLoadInfos(book.downLoadInfos,bookId); if __name__ == '__main__': bookope = BookOperator(); book = Book("aaa","yang","cccc"); book.addDownLoadUrl(DownLoadInfo("aaa.html","书籍")); bookope.addBookInfo(book);
书籍信息文件 bookInfo.py
import sys sys.encoding = "utf8" class Book(object): #书籍信息# def __init__(self,mainInfo,downLoadUrl,bookName): self.mainInfo = mainInfo; self.downLoadUrl = downLoadUrl; self.bookName = bookName; self.downLoadInfos = []; def addDownLoadUrl(self,downloadInfo): self.downLoadInfos.append(downloadInfo); def print_book_info(self): print ("bookName :%s" % (self.bookName)); class DownLoadInfo(object): #下载信息# def __init__(self,downUrl,downName): self.downUrl = downUrl; self.downName = downName; def print_down_info(self): print ("downLoad %s - %s" % (self.downUrl,self.downName));
51job界面解析文件 FiveOneJobFetch.py
import requests from bs4 import BeautifulSoup import sys from bookInfo import Book from bookInfo import DownLoadInfo import logging sys.encoding = "utf8" class PageFetch(object): host = "//www.jb51.net/"; #域名+分类 category = "books/"; #具体请求页 def __init__(self,pageUrl): self.pageUrl = pageUrl; #完整URL self.url = PageFetch.host+PageFetch.category + pageUrl; def __getPageContent(self): req = requests.get(self.url); if req.status_code == 200: req.encoding = "gb2312"; strText = req.text; return strText; else: return ""; def getPageContent(url): req = requests.get(url); if req.status_code == 200: req.encoding = "gb2312"; strText = req.text; return strText; else: return ""; def __getMaxPageNumAndUrl(self): fetchUrl = self.pageUrl; #获取分页地址 分页url 形如 list45_2.html 2为页号# maxPageNum = 0; maxLink = ""; while maxLink == "": url = PageFetch.host+PageFetch.category +fetchUrl; reqContent = PageFetch.getPageContent(url) soup = BeautifulSoup (reqContent,"html.parser"); for ul in soup.select(".plist"): print ("数据"); print (ul); maxPageNum = ul.select("strong")[0].text; alink = ul.select("a"); if alink[-1]['href'] == "#": maxLink = alink[1]['href']; else: fetchUrl = alink[-1]['href']; return maxPageNum,maxLink; def __formatPage(self,pageNum): #格式化url 形如 list45_2.html# lineBeginSite = self.pageUrl.index("_")+1; docBeginSite = self.pageUrl.index("."); return self.pageUrl[:lineBeginSite]+str(pageNum+1)+self.pageUrl[docBeginSite:]; def getBookPageList(self): #获取书籍每页的URL# shortPageList = []; maxPageNum,urlPattern = self.__getMaxPageNumAndUrl(); for i in range(int(maxPageNum)): shortPageList.append(self.host +self.category+ self.__formatPage(i)); return shortPageList; def getDownloadPage(url): downPage= []; reqContent = PageFetch.getPageContent(url); soup = BeautifulSoup (reqContent,"html.parser"); for a in soup.select(".cur-cat-list .btn-dl"): downPage.append(PageFetch.host+a['href']); return downPage; def getBookInfo(url): logging.info("获取书籍信息url:%s" % url); reqContent = PageFetch.getPageContent(url); soup = BeautifulSoup (reqContent,"html.parser"); mainInfo = (soup.select("#soft-intro"))[0].text.replace("截图:","").replace("'",""); title = (soup.select("dl dt h1"))[0].text.replace("'",""); book = Book(mainInfo,url,title); for ul in soup.select(".ul_Address"): for li in ul.select("li"): downLoadInfo = DownLoadInfo(li.select("a")[0]['href'],li.select("a")[0].text); book.addDownLoadUrl(downLoadInfo); return book; if __name__ == '__main__': p = PageFetch("list152_1.html"); shortPageList = p.getBookPageList(); downPage= []; for page in shortPageList: downLoadPage = PageFetch.getDownloadPage(page); downPage = downPage+downLoadPage; print ("================汇总如下==============================="); for bookDownLoadPage in downPage: book = PageFetch.getBookInfo(bookDownLoadPage); print (book.bookName+":%s" % book.downLoadUrl); for d in book.downLoadInfos: print ("%s - %s" % (d.downUrl,d.downName)); # p = PageFetch("list977_1.html"); # p = p.getMaxPageNumAndUrl(); # print (p);
执行文件,以上文件copy在相同的文件夹下 执行此文件即可 51Job.py
from FiveOneJobFetch import PageFetch from bookInfo import Book from bookInfo import DownLoadInfo from bookOpe import BookOperator def main(url): p = PageFetch(url); shortPageList = p.getBookPageList(); bookOperator = BookOperator(); downPage= []; for page in shortPageList: downLoadPage = PageFetch.getDownloadPage(page); downPage = downPage+downLoadPage; for bookDownLoadPage in downPage: book = PageFetch.getBookInfo(bookDownLoadPage); bookOperator.addBookInfo(book); print ("数据抓取成功:"+url); if __name__ == '__main__': urls = ["list152_35.html","list300_2.html","list476_6.html","list977_2.html","list572_5.html","list509_2.html","list481_1.html","list576_1.html","list482_1.html","list483_1.html","list484_1.html"]; for url in urls: main(url);
数据库表:书籍信息表和下载地址表
CREATE TABLE `book` ( `id` INT(11) NOT NULL AUTO_INCREMENT, `bookName` VARCHAR(200) NULL DEFAULT NULL, `bookUrl` VARCHAR(500) NULL DEFAULT NULL, `bookInfo` TEXT NULL, PRIMARY KEY (`id`) ) COLLATE='utf8mb4_general_ci' ENGINE=InnoDB AUTO_INCREMENT=2936;
CREATE TABLE `book_down_url` ( `id` INT(11) NOT NULL AUTO_INCREMENT, `bookId` INT(11) NOT NULL DEFAULT '0', `downName` VARCHAR(200) NOT NULL DEFAULT '0', `downUrl` VARCHAR(2000) NOT NULL DEFAULT '0', PRIMARY KEY (`id`) ) COLLATE='utf8mb4_general_ci' ENGINE=InnoDB AUTO_INCREMENT=44441;
git地址:https://git.oschina.net/yangsj/BookFetch/tree/master
标签:
python爬虫
圆月山庄资源网 Design By www.vgjia.com
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
圆月山庄资源网 Design By www.vgjia.com
暂无评论...
《魔兽世界》大逃杀!60人新游玩模式《强袭风暴》3月21日上线
暴雪近日发布了《魔兽世界》10.2.6 更新内容,新游玩模式《强袭风暴》即将于3月21 日在亚服上线,届时玩家将前往阿拉希高地展开一场 60 人大逃杀对战。
艾泽拉斯的冒险者已经征服了艾泽拉斯的大地及遥远的彼岸。他们在对抗世界上最致命的敌人时展现出过人的手腕,并且成功阻止终结宇宙等级的威胁。当他们在为即将于《魔兽世界》资料片《地心之战》中来袭的萨拉塔斯势力做战斗准备时,他们还需要在熟悉的阿拉希高地面对一个全新的敌人──那就是彼此。在《巨龙崛起》10.2.6 更新的《强袭风暴》中,玩家将会进入一个全新的海盗主题大逃杀式限时活动,其中包含极高的风险和史诗级的奖励。
《强袭风暴》不是普通的战场,作为一个独立于主游戏之外的活动,玩家可以用大逃杀的风格来体验《魔兽世界》,不分职业、不分装备(除了你在赛局中捡到的),光是技巧和战略的强弱之分就能决定出谁才是能坚持到最后的赢家。本次活动将会开放单人和双人模式,玩家在加入海盗主题的预赛大厅区域前,可以从强袭风暴角色画面新增好友。游玩游戏将可以累计名望轨迹,《巨龙崛起》和《魔兽世界:巫妖王之怒 经典版》的玩家都可以获得奖励。
更新日志
2024年11月03日
2024年11月03日
- 明达年度发烧碟MasterSuperiorAudiophile2021[DSF]
- 英文DJ 《致命的温柔》24K德国HD金碟DTS 2CD[WAV+分轨][1.7G]
- 张学友1997《不老的传说》宝丽金首版 [WAV+CUE][971M]
- 张韶涵2024 《不负韶华》开盘母带[低速原抓WAV+CUE][1.1G]
- lol全球总决赛lcs三号种子是谁 S14全球总决赛lcs三号种子队伍介绍
- lol全球总决赛lck三号种子是谁 S14全球总决赛lck三号种子队伍
- 群星.2005-三里屯音乐之男孩女孩的情人节【太合麦田】【WAV+CUE】
- 崔健.2005-给你一点颜色【东西音乐】【WAV+CUE】
- 南台湾小姑娘.1998-心爱,等一下【大旗】【WAV+CUE】
- 【新世纪】群星-美丽人生(CestLaVie)(6CD)[WAV+CUE]
- ProteanQuartet-Tempusomniavincit(2024)[24-WAV]
- SirEdwardElgarconductsElgar[FLAC+CUE]
- 田震《20世纪中华歌坛名人百集珍藏版》[WAV+CUE][1G]
- BEYOND《大地》24K金蝶限量编号[低速原抓WAV+CUE][986M]
- 陈奕迅《准备中 SACD》[日本限量版] [WAV+CUE][1.2G]