123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102 |
- # -*- coding: utf-8 -*-
- # Scrapy settings for get_contact_info project
- # python3 -m scrapy runspider spiders/mitspider.py -o emails.json -L INFO
- # For simplicity, this file contains only settings considered important or
- # commonly used. You can find more settings consulting the documentation:
- #
- # http://doc.scrapy.org/en/latest/topics/settings.html
- # http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
- # http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
- BOT_NAME = 'get_contact_info'
- SPIDER_MODULES = ['get_contact_info.spiders']
- NEWSPIDER_MODULE = 'get_contact_info.spiders'
- HTTPERROR_ALLOW_ALL = True
- # Crawl responsibly by identifying yourself (and your website) on the user-agent
- #USER_AGENT = 'get_contact_info (+http://www.yourdomain.com)'
- # Obey robots.txt rules
- ROBOTSTXT_OBEY = False
- # Set the maximum depth of a site crawling
- DEPTH_LIMIT = 20
- # Set the site crawling algorithm
- # if zero (default), no priority adjustment is made from depth
- # a positive value will decrease the priority, i.e. higher depth requests will be processed later
- # ; this is commonly used when doing breadth-first crawls (BFO).
- # a negative value will increase priority, i.e., higher depth requests will be processed sooner (DFO)
- # DEPTH_PRIORITY = 1
- # SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleFifoDiskQueue'
- # SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.FifoMemoryQueue'
- # Configure maximum concurrent requests performed by Scrapy (default: 16)
- CONCURRENT_REQUESTS = 32
- # Configure a delay for requests for the same website (default: 0)
- # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
- # See also autothrottle settings and docs
- DOWNLOAD_DELAY = 3
- # The download delay setting will honor only one of:
- CONCURRENT_REQUESTS_PER_DOMAIN = 16
- CONCURRENT_REQUESTS_PER_IP = 16
- # Disable cookies (enabled by default)
- COOKIES_ENABLED = True
- # Disable Telnet Console (enabled by default)
- TELNETCONSOLE_ENABLED = True
- # Override the default request headers:
- #DEFAULT_REQUEST_HEADERS = {
- # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
- # 'Accept-Language': 'en',
- #}
- # Enable or disable spider middlewares
- # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
- #SPIDER_MIDDLEWARES = {
- # 'get_contact_info.middlewares.GetContactInfoSpiderMiddleware': 543,
- #}
- # Enable or disable downloader middlewares
- # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
- #DOWNLOADER_MIDDLEWARES = {
- # 'get_contact_info.middlewares.MyCustomDownloaderMiddleware': 543,
- #}
- # Enable or disable extensions
- # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
- #EXTENSIONS = {
- # 'scrapy.extensions.telnet.TelnetConsole': None,
- #}
- # Configure item pipelines
- # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
- #ITEM_PIPELINES = {
- # 'get_contact_info.pipelines.GetContactInfoPipeline': 300,
- #}
- # Enable and configure the AutoThrottle extension (disabled by default)
- # See http://doc.scrapy.org/en/latest/topics/autothrottle.html
- AUTOTHROTTLE_ENABLED = True
- # The initial download delay
- AUTOTHROTTLE_START_DELAY = 5
- # The maximum download delay to be set in case of high latencies
- AUTOTHROTTLE_MAX_DELAY = 60
- # The average number of requests Scrapy should be sending in parallel to
- # each remote server
- AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
- # Enable showing throttling stats for every response received:
- AUTOTHROTTLE_DEBUG = True
- # Enable and configure HTTP caching (disabled by default)
- # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
- HTTPCACHE_ENABLED = True
- HTTPCACHE_EXPIRATION_SECS = 30
- HTTPCACHE_DIR = 'httpcache'
- HTTPCACHE_IGNORE_HTTP_CODES = []
- HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|