Python crawler-Scrapy crawling technology blog post

Python crawler-Scrapy crawling technology blog post

Create project

$scrapy startproject ArticleSpider
You can start your first spider with:
   
    scrapy genspider example example.com

Create a crawler

Create jobbole crawler through scrapy genspide

 $cd ArticleSpider
 $scrapy genspider jobbole blog.jobbole.com

Create main.py

import sys
import os
from scrapy.cmdline import execute

sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(['scrapy','crawl','jobbole'])

Close the robot protocol of the settings.py file

ROBOTSTXT_OBEY = False

Now the entire directory structure is

├── ArticleSpider
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ └── settings.cpython-36.pyc
│ ├── items.py
│ ├── middlewares.py
│ ├── pipelines.py
│ ├── settings.py
│ └── spiders
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ └── jobbole.cpython-36.pyc
│ └── jobbole.py
├── main.py
└── scrapy.cfg

xpath syntax

expression

Description

article

Select all article elements

/article

Select the root element article

article

Select all a elements belonging to the child elements of article

//div

Select all div child elements (no matter where they appear in the document)

article//div

Select all descendant div elements belonging to the artical element, no matter where it appears under the artical

//@class

Select all attributes named class

import scrapy
from ArticleSpider.items import ArticlespiderItem
from scrapy.http import Request
import datetime

class JobboleSpider(scrapy.Spider):
    name ='jobbole'
    allowed_domains = ['blog.jobbole.com']
    start_urls = ['http://blog.jobbole.com/all-posts/']

    def parse(self, response):
       itemList = response.xpath("//div[@id='archive']/div[starts-with(@class,'post')]")
       for item in itemList:
           article = ArticlespiderItem()
           article['url'] = item.xpath(".//a[@class='archive-title']/@href").extract_first()
           article['title'] = item.xpath(".//a[@class='archive-title']/text()").extract_first()
           article['desc'] = item.xpath(".//span[@class='excerpt']/p[1]/text()").extract_first()
           article['thumb'] = item.xpath(".//div[@class='post-thumb']//img/@src").extract_first()
           dateItems = item.xpath(".//p/text()").extract()
           if len(dateItems) >= 1:
               create_date = dateItems[1].strip().replace(" ·", "")
               try:
                  time = datetime.datetime.strptime(create_date,'%Y/%m/%d').date()
               except Exception as e:
                  time =datetime.datetime.now()
               article['date'] =time
           yield article

       next_url = response.xpath("//a[@class='next page-numbers']/@href").extract_first()
       if next_url:
          yield Request(url=next_url, callback=self.parse)

Create Item

class ArticlespiderItem(scrapy.Item):
    # define the fields for your item here like:
    title = scrapy.Field()
    url = scrapy.Field()
    desc = scrapy.Field()
    date = scrapy.Field()
    thumb = scrapy.Field()

data storage

import pymysql

class MysqlDBPipeline(object):
    def __init__(self):
        self.conn = pymysql.connect(host='localhost', user='baxiang', password='123456', port=3306,database='spider'
                                    )
        self.cursor = self.conn.cursor()
    def process_item(self, item, spider):
        sql ='INSERT INTO article(title,url,create_date,content,thumb) VALUES(%s,%s,%s,%s,%s)'
        self.cursor.execute(sql,(item['title'],item['url'],item['date'],item['desc'],item['thumb']))
        self.conn.commit()
        return item
    def spider_close(self):
        self.conn.close()

Modify settings

ITEM_PIPELINES = {
   'ArticleSpider.pipelines.ArticlespiderPipeline': 300,
    'ArticleSpider.pipelines.MysqlDBPipeline': 299
}

Crawler execution

import sys
import os
from scrapy.cmdline import execute

sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(['scrapy','crawl','jobbole'])
Reference: https://cloud.tencent.com/developer/article/1438478 Python crawler-Scrapy crawling technology blog post-Cloud + Community-Tencent Cloud