ScripyはMongoDBにデータをはって導入します。

2787 ワード

例えば、私が始めたのは登山をするサイトです。http://readcolor.com
目的は本のタイトルと本の数とプロフィールを取ることです。
(1)設定アイテムファイル
class DuyuanItem(scrapy.Item):    # define the fields for your item here like:    # name = scrapy.Field()   
book_list_title = scrapy.Field()    
book_number = scrapy.Field()    
book_list_author = scrapy.Field()    
book_list_date = scrapy.Field()    
book_list_summary = scrapy.Field()    
book_url = scrapy.Field()    
book_name = scrapy.Field()    
book_author = scrapy.Field()    
book_summary = scrapy.Field()  #                
(2)セッティングファイル
ROBOTSTXT_OBEY = False #             

ITEM_PIPELINES = {    'duyuan.pipelines.DuyuanPipeline': 300, }  #pipeline     

MONGODB_HOST = '127.0.0.1'
MONGODB_PORT = 27017
MONGODB_DBNAME = 'duyuan'
MONGODB_DOCNAME = 'bookitem'  #MongoDB     
(3)pipelineファイルの設定
import pymongo
from  scrapy.conf import settingsclass 
class DuyuanPipeline(object):
  def __init__(self):    
    host = settings['MONGODB_HOST']
    port = settings['MONGODB_PORT']
    db_name = settings['MONGODB_DBNAME']
    client = pymongo.MongoClient(host=host, port=port)
    db = client[db_name]
    self.post = db[settings['MONGODB_DOCNAME']]
  
  def process_item(self, item, spider):
     book_info = dict(item)
     self.post.insert(book_info)
     return item      
                #        。。        。
(4)爬虫類ファイルの配置
import scrapy
from duyuan.items import DuyuanItemclass
 ReadcolorSpider(scrapy.Spider):
    name = "readcolor"
    allowed_domains = ["readcolor.com"]
    start_urls = ['http://readcolor.com/lists']
    url = 'http://readcolor.com'
    def parse(self, response):
        book_list_group = response.xpath('//article[@style="margin:10px 0 20px;"]')
        for each in book_list_group:
            item = DuyuanItem()  #       
            item['book_list_title'] = each.xpath('header/h3/a/text()').extract()[0] #    ,     xpath       ,           
            item['book_number'] = each.xpath('p/a/text()').extract()[0]
            book_list_url = each.xpath('header/h3/a/@href').extract()[0]
            yield scrapy.Request(self.url+book_list_url,callback=self.parse_book_list_detail,dont_filter=True,meta={'item':item})  #  yield   return   ,       python  ,  url      ,       ,callback    
   def parse_book_list_detail(self,response): #          ,             
        item = response.meta['item']
        summary = response.xpath('//div[@id="list-description"]/p/text()').extract()
        item['book_list_summary'] = '
'.join(summary) yield item