很多时候,我们想要百度一个内容,却往往难以获得有用的信息,这时便需要进行必要的筛选,若是每次都点进连接中去查看,未免太过费时间,这里将利用python爬虫,快速帮你下载百度搜索的各条数据。
# -*- coding: utf-8 -*- from selenium import webdriver from lxml import etree import os import time
save_path = 'D:\\桌面' # 文件路径 filename = "结果集"
if __name__ == "__main__": key_world = input("请输入您想要搜索的内容:") num = int(input("请输入您想要爬取的页数:")) filename = input("请输入您想要存储的文件名:") urls = [] # 页码索引列表 for i in range(num): url = "https://www.baidu.com/s?wd=" + key_world + "&pn=" + str( i * 10) + "&oq=" + key_world + "&ie=utf-8&rsv_pq=da6a000c0001b80d&rsv_t=d7f5m3K7D2ij87xFqs1%2FBxTHWIIxqW6xVfayx6TdZRvpTSctGF1ObM1gLKs" urls.append(url) driver = webdriver.Firefox() for u in urls: driver.get(u) myPage = driver.page_source # 获取源码 spider(myPage) time.sleep(2) driver.quit()
第一个for循环是根据百度的搜索规律,将搜索内容接入后拼接而成的网址,根据你所需要的页数,把他们放到一个列表中去。随后开启selenium,自动打开火狐浏览器,根据上面的网址列表,分别去遍历,也就是得到每一个网址所对应的以及页面,将该页面的源代码通过page_source方法下载下来,交由spider去解析和存储
def spider(myPage): results = Page_Level(myPage) # 解析html文档获得结果 FileSave(results)# 将结果存储到txt文档中
def Page_Level(myPage): # 一级页面 dom = etree.HTML(myPage) results = [] channel_names = dom.xpath('//*[@id="content_left"]/div') for channel in channel_names: try: summary = channel.xpath('div[@class="c-abstract"]/text()') title_link = channel.xpath('div[@class="f13"]/div[1]/@data-tools') print(title_link[0]) results.append("【" + title_link[0] + "\n" + str(summary) + "】\n\n") except: pass return results
利用xpath方法提取出文档中的标题,地址,将要概述
def FileSave(results): if not os.path.exists(save_path): # 判断文件路径是否存在,若不在,则创建 os.mkdir(save_path) path = save_path + os.path.sep + filename + ".txt" with open(path, 'a+', encoding='utf-8') as fp: for i in results: fp.write("%s\n" % (i))
# -*- coding: utf-8 -*- from selenium import webdriver from lxml import etree import os import time # 全局配置 save_path = 'D:\\桌面' # 文件路径 filename = "结果集" # 保存为TXT文档 def FileSave(results): if not os.path.exists(save_path): # 判断文件路径是否存在,若不在,则创建 os.mkdir(save_path) path = save_path + os.path.sep + filename + ".txt" with open(path, 'a+', encoding='utf-8') as fp: for i in results: fp.write("%s\n" % (i)) # 一级页面爬取 def Page_Level(myPage): # 一级页面 dom = etree.HTML(myPage) results = [] channel_names = dom.xpath('//*[@id="content_left"]/div') for channel in channel_names: try: summary = channel.xpath('div[@class="c-abstract"]/text()') title_link = channel.xpath('div[@class="f13"]/div[1]/@data-tools') print(title_link[0]) results.append("【" + title_link[0] + "\n" + str(summary) + "】\n\n") except: pass return results # 爬虫 def spider(myPage): results = Page_Level(myPage) # 解析 FileSave(results)#存储 # 运行 if __name__ == "__main__": key_world = input("请输入您想要搜索的内容:") num = int(input("请输入您想要爬取的页数:")) filename = input("请输入您想要存储的文件名:") urls = [] # 页码索引列表 for i in range(num): url = "https://www.baidu.com/s?wd=" + key_world + "&pn=" + str( i * 10) + "&oq=" + key_world + "&ie=utf-8&rsv_pq=da6a000c0001b80d&rsv_t=d7f5m3K7D2ij87xFqs1%2FBxTHWIIxqW6xVfayx6TdZRvpTSctGF1ObM1gLKs" urls.append(url) driver = webdriver.Firefox() for u in urls: driver.get(u) myPage = driver.page_source # 获取源码 spider(myPage) time.sleep(2) driver.quit()
最终的效果如图
更多爬虫实例,可前往本人码云仓库
https://gitee.com/lk0423/spider