一、分析爬取網頁:
1、網址
https://travel.qunar.com/
2、 打開網站,找到要爬取的網頁
https://travel.qunar.com/p-cs299979-chongqing
進來之后,找到評論界面,如下所示:在這里我選擇驢友點評數據爬取
點擊【驢友點評】,進入最終爬取的網址:https://travel.qunar.com/p-cs299979-chongqing-gaikuang-2-1-1#lydp
3、 進入開發者模型(F12),分析網頁,找到數據接口
(1)點擊網絡
(2)點擊左邊網頁中的第二、第三、第四…頁,觀察右邊的變化:發現右邊有一地址帶有頁數和每頁數據量,因此點擊該地址在網頁中打開發現帶有json格式的數據并且數據對應就是該頁的內容,如下所示:
接口地址:https://travel.qunar.com/place/api/html/comments/dist/299979?sortField=1&pageSize=10&page=1
并且只要變換接口地址后面的page就可以獲取不同頁數的數據。同理,我們發現【熱門攻略】也是如此,因此將其順帶也爬取出來,數據接口地址:https://travel.qunar.com/place/api/html/books/dist/299979?sortField=0&pageSize=10&page=2
三、請求網頁數據并將數據進行保存
當我們確定了真實數據的URL后,這里便可以用requests的get或post方法進行請求網頁數據。關于requests庫的更多使用方式,大家可以前往(https://requests.readthedocs.io/zh_CN/latest/ 或 https://www.cnblogs.com/aaronthon/p/9332757.html)查看。
1、分析爬取的json數據,為提取數據做準備,如下圖所示:json數據里提取的data數據是我們所需要的數據,而data數據就是html形式的字符串,打開網頁與其對比,發現最后需要獲取的數據在li標簽里面,因此我們選擇對其進行提取:采用正則與Beautiful Soup、xpath來解析數據
2、正則re提取數據,完整代碼如下:
# coding:utf-8
import requests,time,os,openpyxl,re
from openpyxl import Workbook
import mysql.connectorclass RenrenLogin(object):def __init__(self):# 設置存儲數據文件路徑self.excellj = ''self.excellj1 = r"C:\XXXXXXXXXXXX\qne1.xlsx"self.excellj2 = r"C:\XXXXXXXXXXXX\qne2.xlsx"def get_html(self, url,names):# 因此f12查看時,數據為json格式data1 = requests.get(url=url).json()self.parse_html(data1,names)def parse_html(self, data,names):L1,L2,L3,L4,L5,L6,L7,L8 = [],[],[],[],[],[],[],[]if(names == "熱門攻略"):userurl = re.findall(r'<a rel="nofollow" class="face" target="_blank" href="(.*?)".*?</a>', data["data"],re.S) # 用戶url地址userpicture = re.findall(r'<img class="imgf" width="50" height="50" src="(.*?)"', data["data"], re.S) # 用戶頭像usertitle = re.findall(r'<img class="imgf".*?title="(.*?)"', data["data"], re.S) # 用戶昵稱L1 = re.findall(r'<h3 class="tit"><a data-beacon="travelbook" target="_blank" href=".*?">(.*?)</h3>',data["data"], re.S) # 用戶發表標題for i in L1:L2.append(''.join(re.findall('[\u4e00-\u9fa5]', i)))usersubject = L2userinfourl = re.findall(r'<a data-beacon="travelbook" target="_blank" href="(.*?)"', data["data"], re.S) # 用戶詳情L3 = re.findall(r'<p class="places">(.*?)<span class="colOrange">(.*?)</span></p><p class="places">',data["data"], re.S) # 用戶途徑for i in L3:L4.append(i[1])useraddress = L4L5 = re.findall(r'<p class="places">途經:(.*?)</p><ul class="pics">', data["data"], re.S) # 用戶行程for i in L5:L6.append(''.join(re.findall('[\u4e00-\u9fa5: ]',i)))userstroke = L6L7 = re.findall(r'<ul class="pics">(.*?)</li></ul>', data["data"], re.S) # 用戶發表圖片for i in L7:L8.append(re.findall(r'src="(.*?)"', i, re.S))userimages = L8userdz = re.findall(r'<i class="iconfont"></i><span>(.*?)</span>', data["data"], re.S) # 用戶點贊數量userpl = re.findall(r'<i class="iconfont"></i><span>(.*?)</span>', data["data"], re.S) # 用戶評論數量for i in range(len(usertitle)):alldata = []alldata.append(usertitle[i])alldata.append(usersubject[i])alldata.append(useraddress[i])alldata.append(userstroke[i])alldata.append(userdz[i])alldata.append(userpl[i])alldata.append(userpicture[i])alldata.append(userurl[i])alldata.append(userinfourl[i])alldata.append(str(userimages[i]))self.parse_excel(alldata,names)else:usertitle = re.findall(r'<div class="e_comment_usr_name"><a rel="nofollow" href=".*?" target="_blank">(.*?)</a></div>',data["data"], re.S)userurl = re.findall(r'<div class="e_comment_usr_name"><a rel="nofollow" href="(.*?)" target="_blank">.*?</a></div>',data["data"], re.S)usercomtit = re.findall(r'<a data-beacon="comment_title" href=".*?" target="_blank">(.*?)</a><span class="icon_gold_camel">',data["data"], re.S)L1 = re.findall(r'<div class="e_comment_content">(.*?)閱讀全部</a></div>', data["data"], re.S)for i in L1:L2.append(''.join(re.findall('[\u4e00-\u9fa5 ]',i)))usercomment = L2L3 = re.findall(r'<ul class="base_fl" ><li><a rel="nofollow" data-beacon="comment_pic" href=".*?" target="_blank">共.*?張》',data["data"], re.S)for i in L3:L4.append(re.findall(r'src="(.*?)"', i, re.S))if(len(L4) < 10 ):for i in range(10-len(L4)):L4.append('空')userimages = L4else:userimages = L4userpicture = re.findall(r'<div class="e_comment_usr_pic"><a rel="nofollow" href=".*?" target="_blank"><img .*? src="(.*?)" /></a></div>',data["data"], re.S)for i in range(len(usertitle)):alldata = []alldata.append(usertitle[i])alldata.append(usercomtit[i])alldata.append(usercomment[i])alldata.append(userurl[i])alldata.append(str(userimages[i]))alldata.append(userpicture[i])self.parse_excel(alldata, names)return Truedef parse_excel(self, alldata,names):if(names == "熱門攻略"):self.excellj = self.excellj1filetitle = ["用戶昵稱","用戶發表主題","用戶途徑","用戶路徑","用戶點贊數","用戶評論數","用戶頭像","用戶主頁地址","用戶詳情地址","用戶發布圖片"]else:self.excellj = self.excellj2filetitle = ["用戶昵稱","用戶發表主題","用戶評論","用戶主頁地址","用戶發布圖片","用戶頭像"]if not os.path.exists(self.excellj):workbook = Workbook()workbook.save(self.excellj)wb = openpyxl.load_workbook(self.excellj)wa = wb.activewa.append(filetitle)wa.append(alldata)wb.save(self.excellj)else:wb = openpyxl.load_workbook(self.excellj)wa = wb.activewa.append(alldata)wb.save(self.excellj)return Truedef main(self, ):UrlList = ["https://travel.qunar.com/place/api/html/books/dist/299979?sortField=0&pageSize=10&page=","https://travel.qunar.com/place/api/html/comments/dist/299979?sortField=1&pageSize=10&page="]names = ["熱門攻略","驢友點評"]for i in range(len(UrlList)):for j in range(1,3):url = UrlList[i] + str(j)self.get_html(url,names[i])print(f"重慶地區【{names[i]}】第{j}頁數據爬取結束!!!")time.sleep(10)if __name__ == '__main__':spider = RenrenLogin()spider.main()
結果如下所示:
【熱門攻略】:
【驢友點評】:
3、BeautifulSoup提取數據,完整代碼如下:這里只爬取了驢友點評,熱門攻略也是一樣方法
# coding:utf-8
import requests,time,os,openpyxl,re
from openpyxl import Workbook
from bs4 import BeautifulSoupclass RenrenLogin(object):def __init__(self):self.excellj = r"C:\XXXXXXXXXXXX\qne1.xlsx"def get_html(self, url):data1 = requests.get(url=url).json()self.parse_html(data1)def parse_html(self, data):soup = BeautifulSoup(data["data"], 'lxml')L1,L2,L3,L4,L5,L6,L7,L8 = [],[],[],[],[],[],[],[]sellList1 = soup.find_all('div',class_="e_comment_usr_name")for i in sellList1:soup1 = BeautifulSoup(str(i), 'lxml')div_tag = soup1.find('div')a_tags = div_tag.find('a')userhref = a_tags.get('href')L1.append(userhref)L2.append(a_tags.text)usertitle = L2userurl = L1sellList2 = soup.find_all('div',class_="e_comment_title")for i in sellList2:soup1 = BeautifulSoup(str(i), 'lxml')div_tag = soup1.find('div')a_tags = div_tag.find('a')L3.append(a_tags.text)usercomtit = L3sellList3 = soup.find_all('div',class_="e_comment_content")for i in sellList3:str1 = ''soup1 = BeautifulSoup(str(i), 'lxml')div_tag = soup1.find('div')a_tags = div_tag.find_all('p')for tag in a_tags:str1 = str1 + tag.text +' 'L4.append(str1)usercomment = L4sellList4 = soup.find_all('div', class_="e_comment_imgs clrfix")L1 = []for i in sellList4:str1 = ''soup1 = BeautifulSoup(str(i), 'lxml')div_tag = soup1.find('div')a_tags = div_tag.find_all('img')for j in a_tags:str1 = str1 + j.get("src") + ' , 'L5.append(str1)if (len(L5) < 10):for i in range(10 - len(L4)):L5.append('空')userimages = L5else:userimages = L5sellList5 = soup.find_all('div',class_="e_comment_usr_pic")for i in sellList5:soup1 = BeautifulSoup(str(i), 'lxml')div_tag = soup1.find('div')a_tags = div_tag.find('a')userhref = a_tags.get('href')L6.append(userhref)userpicture = L6for i in range(len(usertitle)):alldata = []alldata.append(usertitle[i])alldata.append(usercomtit[i])alldata.append(usercomment[i])alldata.append(userurl[i])alldata.append(str(userimages[i]))alldata.append(userpicture[i])self.parse_excel(alldata)return Truedef parse_excel(self, alldata):filetitle = ["用戶昵稱","用戶發表主題","用戶評論","用戶主頁地址","用戶發布圖片","用戶頭像"]if not os.path.exists(self.excellj):workbook = Workbook()workbook.save(self.excellj)wb = openpyxl.load_workbook(self.excellj)wa = wb.activewa.append(filetitle)wa.append(alldata)wb.save(self.excellj)else:wb = openpyxl.load_workbook(self.excellj)wa = wb.activewa.append(alldata)wb.save(self.excellj)return Truedef main(self, ):UrlList = ["https://travel.qunar.com/place/api/html/comments/dist/299979?sortField=1&pageSize=10&page="]names = ["驢友點評"]for i in range(len(UrlList)):for j in range(1,3):url = UrlList[i] + str(j)self.get_html(url)print(f"重慶地區【{names[i]}】第{j}頁數據爬取結束!!!")time.sleep(10)if __name__ == '__main__':spider = RenrenLogin()spider.main()
【驢友點評】:
4、Xpath提取數據,完整代碼如下:這里只爬取了熱門攻略,驢友點評也是一樣方法
# coding:utf-8
import requests,time,os,openpyxl,re
from openpyxl import Workbook
from lxml import etreeclass RenrenLogin(object):def __init__(self):self.excellj = r"C:\XXXXXXXXXX\qne1.xlsx"def get_html(self, url):data1 = requests.get(url=url).json()self.parse_html(data1)def parse_html(self, data):L1,L2,L3,L4,L6 = [],[],[],[],[]html = etree.HTML(data["data"])usertitle = html.xpath('//span[@class="user_name"]/a/text()')userurl = html.xpath('//span[@class="user_name"]/a/@href')userpicture = html.xpath('//img[@class="imgf"]/@src')for i in range(10):userzt1 = html.xpath('//h3[@class="tit"]')[i]userzt2 = userzt1.xpath('./a/text()')str1 = ''for j in range(len(userzt2)):str1 = str1 + userzt2[j]L1.append(str1)usersubject = L1for i in range(10):useraddres1 = html.xpath('//li[@class="list_item"]')[i]useraddres2 = useraddres1.xpath('p/text()')[0]useraddres3 = html.xpath('//span[@class="colOrange"]')[i]useraddres4 = useraddres3.xpath('./text()')[0]L2.append(useraddres2 + useraddres4)useraddress = L2for i in range(10):userstroke1 = html.xpath('//li[@class="list_item"]')[i]userstroke2 = userstroke1.xpath('p[4]/text()')L3.append(userstroke2)userstroke = L3for i in range(10):userimages = html.xpath('//ul[@class="pics"]')[i]L5 = []for j in range(1, len(userimages) + 1):L5.append(userimages.xpath(f'li[{j}]/a/img/@src'))L4.append(L5)userimages = L4userdz = html.xpath('//span[@class="icon_view"]/span/text()')userpl = html.xpath('//span[@class="icon_love"]/span/text()')for i in range(len(usertitle)):alldata = []alldata.append(usertitle[i])alldata.append(usersubject[i])alldata.append(useraddress[i])alldata.append(str(userstroke[i]))alldata.append(userdz[i])alldata.append(userpl[i])alldata.append(userpicture[i])alldata.append(userurl[i])alldata.append(str(userimages[i]))self.parse_excel(alldata)return Truedef parse_excel(self, alldata):filetitle = ["用戶昵稱","用戶發表主題","用戶途徑","用戶路徑","用戶點贊數","用戶評論數","用戶頭像","用戶主頁地址","用戶發布圖片"]if not os.path.exists(self.excellj):workbook = Workbook()workbook.save(self.excellj)wb = openpyxl.load_workbook(self.excellj)wa = wb.activewa.append(filetitle)wa.append(alldata)wb.save(self.excellj)else:wb = openpyxl.load_workbook(self.excellj)wa = wb.activewa.append(alldata)wb.save(self.excellj)return Truedef main(self, ):UrlList = ["https://travel.qunar.com/place/api/html/books/dist/299979?sortField=0&pageSize=10&page="]names = ["熱門攻略"]for i in range(len(UrlList)):for j in range(1,3):url = UrlList[i] + str(j)self.get_html(url)print(f"重慶地區【{names[i]}】第{j}頁數據爬取結束!!!")time.sleep(10)if __name__ == '__main__':spider = RenrenLogin()spider.main()
結果如下: