Skip to content
Snippets Groups Projects
Commit 54e58d4a authored by Wenchao Chen's avatar Wenchao Chen
Browse files

Adding scrapy project

parent 8df0597d
Branches
No related tags found
No related merge requests found
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="TestRunnerService">
<option name="PROJECT_TEST_RUNNER" value="Unittests" />
</component>
</module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectLevelVcsManager" settingsEditedManually="false">
<OptionsSetting value="true" id="Add" />
<OptionsSetting value="true" id="Remove" />
<OptionsSetting value="true" id="Checkout" />
<OptionsSetting value="true" id="Update" />
<OptionsSetting value="true" id="Status" />
<OptionsSetting value="true" id="Edit" />
<ConfirmationsSetting value="0" id="Add" />
<ConfirmationsSetting value="0" id="Remove" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.5.2 (/usr/bin/python3.5)" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/RespoDL.iml" filepath="$PROJECT_DIR$/.idea/RespoDL.iml" />
</modules>
</component>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
\ No newline at end of file
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class RespodlItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
file_urls = scrapy.Field()
files = scrapy.Field()
image_urls = scrapy.Field()
images = scrapy.Field()
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class RespodlPipeline(object):
def process_item(self, item, spider):
return item
# -*- coding: utf-8 -*-
import os
# Scrapy settings for RespoDL project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'RespoDL'
SPIDER_MODULES = ['RespoDL.spiders']
NEWSPIDER_MODULE = 'RespoDL.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'RespoDL.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'RespoDL.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapy.pipelines.images.ImagesPipeline': 1,
'scrapy.pipelines.files.FilesPipeline': 2
}
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
FILES_STORE = os.path.join(CUR_DIR, '..', 'files')
FILES_URLS_FIELD = 'file_urls'
FILES_RESULT_FIELD = 'files'
IMAGES_STORE = os.path.join(CUR_DIR, '..', 'images')
IMAGES_URLS_FIELD = 'image_urls'
IMAGES_RESULT_FIELD = 'images'
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# Author: 陈闻超
# Version: 1.0
import scrapy
import codecs
import os
class DownloadSpider(scrapy.Spider):
def write_file(self, filename, source):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with codecs.open(filename, 'wb', 'utf-8') as f:
f.write(source)
\ No newline at end of file
# Author: 陈闻超
# Version: 1.0
import re
from scrapy import Request
from scrapy import FormRequest
from RespoDL.spiders import DownloadSpider
class HuaianshiSpider(DownloadSpider):
name = 'huaianshi'
start_urls = ['http://222.184.118.98/haportal/jsp/portal/allportalPublicityList.do']
def build_page_request(self, page):
return FormRequest("http://222.184.118.98/haportal/jsp/portal/allportalPublicityList.do",
formdata={'formOrigin': '0',
'page': str(page)},
headers={
'referer': 'http://222.184.118.98/haportal/jsp/portal/allportalPublicityList.do'},
callback=self.parse,)
def parse(self, response):
cur_page = response.css("div.mainbody table tr td span::text").extract_first().strip()
filename = 'downloads/' + self.name + '/page_' + cur_page + '/' + cur_page
self.write_file(filename, response.text)
rows = response.css("div.list tr")[1:]
for row in rows:
cells = row.css("td")
url = cells.css("a p::attr('onclick')").extract_first()
url = re.search('\d+', url).group(0)
url = response.urljoin('allportalPublicityView.do?formId=' + url)
yield Request(url, meta={'page': cur_page}, callback=self.parse_detail)
last_page = response.css("div.mainbody table tr td span::text").extract()[1].strip()
if int(cur_page) < int(last_page):
yield self.build_page_request(int(cur_page)+1)
def parse_detail(self, response):
page = response.request.meta['page']
page_source = response.text
page_source = re.sub(r'charset=gb2312\"', 'charset=\"gb2312\"', page_source)
filename = 'downloads/' + self.name + '/page_' + page + '/' + response.url.split("?")[-1]
self.write_file(filename, page_source)
\ No newline at end of file
# Automatically created by: scrapy startproject
#
# For more information about the [deploy] section see:
# https://scrapyd.readthedocs.org/en/latest/deploy.html
[settings]
default = RespoDL.settings
[deploy]
#url = http://localhost:6800/
project = RespoDL
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment