scrapy全網で蘇寧易購入図書を這い取る

4573 ワード

分析:item["h_3"]の中身は、jsonがバッグをつかんで分析したリンクからitem["h_3"]を印刷することで、様々なタイプの書籍の最初のページのリンク正則解析paramを知ることができる.pageNumbers=(.?)paramとcurrentPage =(.?)次のページがあるかどうかを判断してページをめくり、item["h_3"]のページに1を加える
# -*- coding: utf-8 -*-
import re
from copy import deepcopy
import scrapy


class SnSpider(scrapy.Spider):
    name = 'sn2'
    allowed_domains = ['suning.com']
    start_urls = ['https://book.suning.com/']

    def parse(self, response):
        li_list = response.xpath("//div[@class='menu-list']//dl")
        for li in li_list:
            item = {}
            item["b_cate"] = li.xpath(".//dt/h3/a/text()").extract_first()  #    
            a_list = li.xpath("./dd/a")  #    
            for a in a_list:
                item["s_href"] = a.xpath("./@href").extract_first()  #       
                item["s_cate"] = a.xpath("./text()").extract_first()  #      
                item["h_2"] = item["s_href"][26:32]
                item["h_3"] = "http://list.suning.com/emall/showProductList.do?ci=" + item["h_2"] + '&pg=03&cp=2'
                print(item["h_3"])

                if item["h_3"] is not None:
                    print(item["h_3"])
                    yield scrapy.Request(item["h_3"], callback=self.parse_book_list, meta={"item": item})


    def parse_book_list(self, response):
        item = response.meta["item"]
        # print(response.body.encode())
        li_list = response.xpath("//ul[@class='clearfix']/li")
        for li in li_list:
                item["book_commit"] = li.xpath(".//div[@class='res-info']/p[3]/a/text()[1]").extract_first()  #    
                item["book_desc"] = response.xpath(".//div[@class='res-info']/p[2]/a/em/text()").extract_first()  #     
                # item["book_price"] = response.xpath(".//div[@class='res-info']/p/em[1]").extract_first()  #     
                item["book_name"] = li.xpath(".//div[@class='res-img']//a[@target='_blank']/img/@alt").extract_first()  #   
                item["book_image"] = li.xpath(
                    ".//div[@class='res-img']//a[@target='_blank']/@href").extract_first()  #       
                if item["book_image"] is not None:
                    item["book_image"] = "https:" + item["book_image"]
                    yield scrapy.Request(item["book_image"], callback=self.parse_book_detail, meta={"item": deepcopy(item)})

        page_count = int(re.findall("param.pageNumbers=(.*?);", response.body.decode())[0])
        current_page = int(re.findall("param.currentPage =(.*?);", response.body.decode())[0])
        if current_page < page_count:
            next_url = item["h_3"][:-1] + '{}'.format(current_page + 1)
            yield scrapy.Request(
                next_url,
                callback=self.parse_book_list,
                meta={"item": response.meta["item"]}
            )

    def parse_book_detail(self, response):
        item = response.meta["item"]
        book_detail = response.xpath("//ul[@class='bk-publish clearfix']/li").extract()
        for book1 in book_detail:
            book2 = book1.replace("
", "").replace("\r", "").replace("\t", "").replace("", "").replace("", "") item["book_author"] = re.findall(" :(.*?)", book2) item["book_author"] = item["book_author"][0] if len(item["book_author"]) > 0 else None item["book_press"] =re.findall(" :(.*?)",book2) item["book_press"] = item["book_press"][0] if len(item["book_press"]) > 0 else None item["publish_time"] =re.findall(" :(.*?)",book2) item["publish_time"] = item["publish_time"][0] if len(item["publish_time"]) > 0 else None print(item) ------------------------------------------------

スキップ入力:
scrapy crawl sn2