Python爬虫框架scrapy爬取腾讯招聘


Python爬虫框架scrapy爬取腾讯招聘

文章插图
 
创建项目
scrapy startproject tencent编写items.py
写class TencentItem
import scrapy
class TencentItem(scrapy.Item):
# define the fields for your item here like:
# 职位名
positionname = scrapy.Field()
# 详情连接
positionlink = scrapy.Field()
# 职位类别
positionType = scrapy.Field()
# 招聘人数
peopleNum = scrapy.Field()
# 工作地点
workLocation = scrapy.Field()
# 发布时间
publishTime = scrapy.Field()
创建基础类的爬虫
scrapy genspider tencentPosition"tencent.com"
tencentPosition.py
# -*- coding: utf-8 -*-import scrapyfrom tencent.items import TencentItemclass TencentpositionSpider(scrapy.Spider): name = "tencent" allowed_domains = ["tencent.com"] url = "http://hr.tencent.com/position.php?&start=" offset = 0 start_urls = [url + str(offset)] def parse(self, response): for each in response.xpath("//tr[@class='even'] | //tr[@class='odd']"): # 初始化模型对象 item = TencentItem() item['positionname'] = each.xpath("./td[1]/a/text()").extract()[0] # 详情连接 item['positionlink'] = each.xpath("./td[1]/a/@href").extract()[0] # 职位类别 item['positionType'] = each.xpath("./td[2]/text()").extract()[0] # 招聘人数 item['peopleNum'] = each.xpath("./td[3]/text()").extract()[0] # 工作地点 item['workLocation'] = each.xpath("./td[4]/text()").extract()[0] # 发布时间 item['publishTime'] = each.xpath("./td[5]/text()").extract()[0] yield item if self.offset < 1680: self.offset += 10 # 每次处理完一页的数据之后 , 重新发送下一页页面请求 # self.offset自增10 , 同时拼接为新的url , 并调用回调函数self.parse处理Response yield scrapy.Request(self.url + str(self.offset), callback = self.parse)管道文件
pipelines.py
import jsonclass TencentPipeline(object): def __init__(self): self.filename = open("tencent.json", "w") def process_item(self, item, spider): text = json.dumps(dict(item), ensure_ascii = False) + ",n" self.filename.write(text.encode("utf-8")) return item def close_spider(self, spider): self.filename.close()在settings文件设置pipelines
ITEM_PIPELINES = {
'tencent.pipelines.TencentPipeline': 300,
}
添加请求报头
DEFAULT_REQUEST_HEADERS
settings.py
BOT_NAME = 'tencent'SPIDER_MODULES = ['tencent.spiders']NEWSPIDER_MODULE = 'tencent.spiders'ROBOTSTXT_OBEY = TrueDOWNLOAD_DELAY = 2DEFAULT_REQUEST_HEADERS = { "User-Agent" : "Mozilla/5.0 (compatible; MSIE 9.0; windows NT 6.1; Trident/5.0;", 'Accept': 'text/html,Application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}ITEM_PIPELINES = { 'tencent.pipelines.TencentPipeline': 300,}
【Python爬虫框架scrapy爬取腾讯招聘】


    推荐阅读