創(chuàng)建一個爬蟲項目
scrapy startproject spider_name
構(gòu)建一個爬蟲
scrapy genspider baidu_spider www.baidu.com
運行指定爬蟲
scrapy runspider 爬蟲名稱
使爬蟲從停止的地方開始爬取
scrapy crawl 爬蟲名 -s JOBDIR=crawls/爬蟲名
在cmd或者命令行中運行爬蟲
scrapy crawl 爬蟲名
scrapy post請求簡書所搜功能
import scrapy
import json
class JianshuSpider(scrapy.Spider):
handle_httpstatus_list = [404]
name = 'jianshu'
allowed_domains = ['www.reibang.com']
headers = {
"Host": "www.reibang.com",
"Connection": "keep-alive",
"Content-Length": "0",
"Accept": "application/json",
"Origin": "http://www.reibang.com",
"X-CSRF-Token": "ftkf0tgVZjazuefhOQIGxF8hErgCVcx6ZzI0rc/gW8fnLXFlCMxvrmynQDnCaxfeSazU8FzkXLnNDKC04P/n1Q==",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
"Referer": "http://www.reibang.com/search?utf8=%E2%9C%93&q=%E6%9A%B4%E9%9B%B7",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
}
cookies = {
"signin_redirect":"https%3A%2F%2Fwww.reibang.com%2Fsearch%3Futf8%3D%25E2%259C%2593%26q%3D%25E6%259A%25B4%25E9%259B%25B7",
"read_mode":"day",
"default_font":"font2",
"locale":"zh-CN",
"_m7e_session":"ef50c62444a30571485f70fc07580e0d",
"Hm_lvt_0c0e9d9b1e7d617b3e6842e85b9fb068":"1533108867",
"Hm_lpvt_0c0e9d9b1e7d617b3e6842e85b9fb068":"1533108867",
"sajssdk_2015_cross_new_user":"1",
"sensorsdata2015jssdkcross":"%7B%22distinct_id%22%3A%22164f468d0e73a8-0825d1e6f53621-47e1039-2073600-164f468d0e847a%22%2C%22%24device_id%22%3A%22164f468d0e73a8-0825d1e6f53621-47e1039-2073600-164f468d0e847a%22%2C%22props%22%3A%7B%7D%7D",
}
def start_requests(self):
start_url = 'http://www.reibang.com/search/do?q=%E6%9A%B4%E9%9B%B7&type=note&page=1&order_by=default'
# 若攜帶數(shù)據(jù)data
data = {
xxx:xxx,
xxx:xxx,
}
yield scrapy.Request(
start_url,
callback=self.parse,
headers=self.headers,
cookies=self.cookies,
# formdata=data,
method='post')
def parse(self,response):
# json格式數(shù)據(jù)的解析
sites = json.loads(response.body_as_unicode())
print(sites)
最后編輯于 :
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者