首先所需要的环境:(我用的是Python2的,可以选择python3,具体遇到的问题自行解决,目前我这边几百万的数据量爬取)
环境:Python 2.7.10 Scrapy Scrapy 1.5.0
第三方库:
PyMySQL==0.8.0Scrapy==1.5.0pytesseract==0.2.0pip==10.0.1Pillow==5.1.0logger==1.4bs4==0.0.1requests==2.18.4创建项目 scrapy startproject mytest创建爬虫程序cd mytest scrapy genspider name XXX.com
直接贴代码具体需要注意的特殊颜色标出有注释
# -*- coding: utf-8 -*-import scrapy import pytesseract #验证码识别库from PIL import Image #验证码图片处理from scrapy.http import Requestfrom yishi.items import YishiItem #items定义爬取字段from yishi.settings import MYSQL_HOST, MYSQL_DBNAME, MYSQL_USER, MYSQL_PASSWD #settings数据库配置 import pymysql #连接数据库import logging #打印日志
#设置日志log_filename = '../static/data/info.log'logging.basicConfig(filename=log_filename, filemode='a', level=logging.INFO)
class CreditSpider(scrapy.Spider): name = 'name' baseURL = 'https://xxx.com' #start_urls = '' #设置headers,打开网页直接看请求headers复制进去就可以了 headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Host': 'xxx', 'Upgrade-Insecure-Requests': 1, 'User-Agent': 'xxx', } #数据库 connect = pymysql.connect( host=MYSQL_HOST, db=MYSQL_DBNAME, user=MYSQL_USER, passwd=MYSQL_PASSWD, charset='utf8', use_unicode=True) #重写start_requests def start_requests(self): return [Request(self.baseURL+'xxx', headers=self.headers, callback=self.parse, dont_filter=True, #scrapy会对request的URL去重(RFPDupeFilter),加上dont_filter则告诉它这个URL不参与去重 ) ] #首先需要请求一次网页 def parse(self, response): #每次查询1条数据,搜索列表所需要的条件 cursor = self.connect.cursor() sql = 'select id,xxx,xxx,xxx from xxx where xxx order by id limit 1' cursor.execute(sql) res = cursor.fetchall() if res: #请求网站所需要的参数,搜索条件 data = { "xxx": res[0][1], "xxx": '', "xxx": '', "xxx": res[0][2], "xxx": '', "xxx": '', "xxx": '', } cursor.close() return scrapy.Request(self.baseURL + '/xxx/captcha', #验证码图片地址 headers=self.headers, meta={'data': data, 'dr_id': res[0][0], 'static': res[0][3], 'len': len(res)}, #第一次请求的参数传给下次请求,可以保存cookie之类的 callback=self.creditRes, dont_filter=True ) else: #数据表中条件没有的时候结束爬虫,每次爬取要更新下条件表 print '执行完毕!' pass #再次请求存验证码图片 def creditRes(self, response): #保存验证码 captchaFile = '../static/images/code/captcha.png' with open(captchaFile, 'wb') as f: f.write(response.body) try: #pytesseract识别验证码 image = Image.open(captchaFile) captcha_value = pytesseract.image_to_string(image) print '验证码为:'+captcha_value except: #验证码失败 重新请求 logging.info('验证码获取失败') return self.start_urls #识别后的验证码作为参数使用 data = response.meta.get("data") data["validCode"] = captcha_value return [scrapy.FormRequest( url=self.baseURL+'xxx', #带上全部参数再次请求取数据 formdata=data, method='GET', meta={'dr_id': response.meta.get("dr_id"), 'static': response.meta.get("static"), 'len': response.meta.get("len"), 'captcha_value': captcha_value}, #带上部分参数保存或更新状态用 headers=self.headers, callback=self.creditdata, dont_filter=True, )] def creditdata(self, response): #获取验证码错误内容,识别验证是否成功 code_data = response.xpath("//span[@class='error']") if code_data: code = code_data.xpath(".//text()").extract()[0].decode('UTF-8') logging.info('验证码校验失败,验证码:'+str(response.meta.get("captcha_value"))) else: code = '' #验证码错误时不更新状态,继续重复爬取 dr_id = response.meta.get("dr_id") #不存在验证码识别更新状态,插入数据 if code.strip() not in ('验证码错误', '验证码不能为空'): cursor = self.connect.cursor() sql = 'update xxx set status=%s where id=%s' % (1, dr_id) cursor.execute(sql) self.connect.commit() cursor.close() else: #验证码失败不更新状态 logging.info('验证码错误') node_list = response.xpath("//table[@id='formresult']/tbody/tr") # 更新状态 0还未抓取数据 1已经抓取 logging.info('当前执行条件表id为'+ str(dr_id)) if node_list: for node in node_list: item = YishiItem() item['xxx'] = dr_id item['xxx'] = node.xpath(".//td[1]/text()").extract()[0].decode('UTF-8') item['xxx'] = node.xpath(".//td[2]/text()").extract()[0].decode('UTF-8') item['xxx'] = node.xpath(".//td[3]/text()").extract()[0].decode('UTF-8') item['xxx'] = node.xpath(".//td[4]/text()").extract()[0].decode('UTF-8') item['xxx'] = node.xpath(".//td[5]/text()").extract()[0].decode('UTF-8') item['xxx'] = node.xpath(".//td[6]/text()").extract()[0].decode('UTF-8') item['xxx'] = node.xpath(".//td[7]/text()").extract()[0].decode('UTF-8') yield item #分页数据,根据下一页爬取,可获取下页按钮状态去爬取分页数据 nextPage = response.xpath("//a[@class='disable' and @class='next']") if nextPage: if not len(nextPage): #下一页a标签url url = response.xpath("//a[@class='disable' and @class='next']/@href").extract()[0] yield scrapy.Request(self.baseURL+'/'+url, callback=self.creditdata) # 根据状态status=0判断是否继续爬取数据 len = response.meta.get("len") if not len == 0: yield scrapy.Request(self.baseURL+'xxx', headers=self.headers, callback=self.parse, dont_filter=True)items设置:
xxx = scrapy.Field() xxx = scrapy.Field() ...pipelines存数据库这个就不说了根据自己的业务
注:目前我网站验证码比较简单可以直接使用pytesseract,识别率95%以上,也可以用别的方式识别,验证码如:
参考:http://www.pythonsite.com/?p=358
个人感觉用 requests.get() 方式写要简单一些,本地已测试过,
__根据业务需求用scrapy完成的。
requests
__.get()
主要问题就是
session = requests.session() 这句是重点,看不懂的可以加qq群公共学习