You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

336 lines
13 KiB

4 years ago
  1. import six
  2. import signal
  3. import logging
  4. import warnings
  5. import sys
  6. from twisted.internet import reactor, defer
  7. from zope.interface.verify import verifyClass, DoesNotImplement
  8. from scrapy.core.engine import ExecutionEngine
  9. from scrapy.resolver import CachingThreadedResolver
  10. from scrapy.interfaces import ISpiderLoader
  11. from scrapy.extension import ExtensionManager
  12. from scrapy.settings import overridden_settings, Settings
  13. from scrapy.signalmanager import SignalManager
  14. from scrapy.exceptions import ScrapyDeprecationWarning
  15. from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
  16. from scrapy.utils.misc import load_object
  17. from scrapy.utils.log import (
  18. LogCounterHandler, configure_logging, log_scrapy_info,
  19. get_scrapy_root_handler, install_scrapy_root_handler)
  20. from scrapy import signals
  21. logger = logging.getLogger(__name__)
  22. class Crawler(object):
  23. def __init__(self, spidercls, settings=None):
  24. if isinstance(settings, dict) or settings is None:
  25. settings = Settings(settings)
  26. self.spidercls = spidercls
  27. self.settings = settings.copy()
  28. self.spidercls.update_settings(self.settings)
  29. d = dict(overridden_settings(self.settings))
  30. logger.info("Overridden settings: %(settings)r", {'settings': d})
  31. self.signals = SignalManager(self)
  32. self.stats = load_object(self.settings['STATS_CLASS'])(self)
  33. handler = LogCounterHandler(self, level=self.settings.get('LOG_LEVEL'))
  34. logging.root.addHandler(handler)
  35. if get_scrapy_root_handler() is not None:
  36. # scrapy root handler already installed: update it with new settings
  37. install_scrapy_root_handler(self.settings)
  38. # lambda is assigned to Crawler attribute because this way it is not
  39. # garbage collected after leaving __init__ scope
  40. self.__remove_handler = lambda: logging.root.removeHandler(handler)
  41. self.signals.connect(self.__remove_handler, signals.engine_stopped)
  42. lf_cls = load_object(self.settings['LOG_FORMATTER'])
  43. self.logformatter = lf_cls.from_crawler(self)
  44. self.extensions = ExtensionManager.from_crawler(self)
  45. self.settings.freeze()
  46. self.crawling = False
  47. self.spider = None
  48. self.engine = None
  49. @property
  50. def spiders(self):
  51. if not hasattr(self, '_spiders'):
  52. warnings.warn("Crawler.spiders is deprecated, use "
  53. "CrawlerRunner.spider_loader or instantiate "
  54. "scrapy.spiderloader.SpiderLoader with your "
  55. "settings.",
  56. category=ScrapyDeprecationWarning, stacklevel=2)
  57. self._spiders = _get_spider_loader(self.settings.frozencopy())
  58. return self._spiders
  59. @defer.inlineCallbacks
  60. def crawl(self, *args, **kwargs):
  61. assert not self.crawling, "Crawling already taking place"
  62. self.crawling = True
  63. try:
  64. self.spider = self._create_spider(*args, **kwargs)
  65. self.engine = self._create_engine()
  66. start_requests = iter(self.spider.start_requests())
  67. yield self.engine.open_spider(self.spider, start_requests)
  68. yield defer.maybeDeferred(self.engine.start)
  69. except Exception:
  70. # In Python 2 reraising an exception after yield discards
  71. # the original traceback (see https://bugs.python.org/issue7563),
  72. # so sys.exc_info() workaround is used.
  73. # This workaround also works in Python 3, but it is not needed,
  74. # and it is slower, so in Python 3 we use native `raise`.
  75. if six.PY2:
  76. exc_info = sys.exc_info()
  77. self.crawling = False
  78. if self.engine is not None:
  79. yield self.engine.close()
  80. if six.PY2:
  81. six.reraise(*exc_info)
  82. raise
  83. def _create_spider(self, *args, **kwargs):
  84. return self.spidercls.from_crawler(self, *args, **kwargs)
  85. def _create_engine(self):
  86. return ExecutionEngine(self, lambda _: self.stop())
  87. @defer.inlineCallbacks
  88. def stop(self):
  89. if self.crawling:
  90. self.crawling = False
  91. yield defer.maybeDeferred(self.engine.stop)
  92. class CrawlerRunner(object):
  93. """
  94. This is a convenient helper class that keeps track of, manages and runs
  95. crawlers inside an already setup Twisted `reactor`_.
  96. The CrawlerRunner object must be instantiated with a
  97. :class:`~scrapy.settings.Settings` object.
  98. This class shouldn't be needed (since Scrapy is responsible of using it
  99. accordingly) unless writing scripts that manually handle the crawling
  100. process. See :ref:`run-from-script` for an example.
  101. """
  102. crawlers = property(
  103. lambda self: self._crawlers,
  104. doc="Set of :class:`crawlers <scrapy.crawler.Crawler>` started by "
  105. ":meth:`crawl` and managed by this class."
  106. )
  107. def __init__(self, settings=None):
  108. if isinstance(settings, dict) or settings is None:
  109. settings = Settings(settings)
  110. self.settings = settings
  111. self.spider_loader = _get_spider_loader(settings)
  112. self._crawlers = set()
  113. self._active = set()
  114. @property
  115. def spiders(self):
  116. warnings.warn("CrawlerRunner.spiders attribute is renamed to "
  117. "CrawlerRunner.spider_loader.",
  118. category=ScrapyDeprecationWarning, stacklevel=2)
  119. return self.spider_loader
  120. def crawl(self, crawler_or_spidercls, *args, **kwargs):
  121. """
  122. Run a crawler with the provided arguments.
  123. It will call the given Crawler's :meth:`~Crawler.crawl` method, while
  124. keeping track of it so it can be stopped later.
  125. If `crawler_or_spidercls` isn't a :class:`~scrapy.crawler.Crawler`
  126. instance, this method will try to create one using this parameter as
  127. the spider class given to it.
  128. Returns a deferred that is fired when the crawling is finished.
  129. :param crawler_or_spidercls: already created crawler, or a spider class
  130. or spider's name inside the project to create it
  131. :type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
  132. :class:`~scrapy.spiders.Spider` subclass or string
  133. :param list args: arguments to initialize the spider
  134. :param dict kwargs: keyword arguments to initialize the spider
  135. """
  136. crawler = self.create_crawler(crawler_or_spidercls)
  137. return self._crawl(crawler, *args, **kwargs)
  138. def _crawl(self, crawler, *args, **kwargs):
  139. self.crawlers.add(crawler)
  140. d = crawler.crawl(*args, **kwargs)
  141. self._active.add(d)
  142. def _done(result):
  143. self.crawlers.discard(crawler)
  144. self._active.discard(d)
  145. return result
  146. return d.addBoth(_done)
  147. def create_crawler(self, crawler_or_spidercls):
  148. """
  149. Return a :class:`~scrapy.crawler.Crawler` object.
  150. * If `crawler_or_spidercls` is a Crawler, it is returned as-is.
  151. * If `crawler_or_spidercls` is a Spider subclass, a new Crawler
  152. is constructed for it.
  153. * If `crawler_or_spidercls` is a string, this function finds
  154. a spider with this name in a Scrapy project (using spider loader),
  155. then creates a Crawler instance for it.
  156. """
  157. if isinstance(crawler_or_spidercls, Crawler):
  158. return crawler_or_spidercls
  159. return self._create_crawler(crawler_or_spidercls)
  160. def _create_crawler(self, spidercls):
  161. if isinstance(spidercls, six.string_types):
  162. spidercls = self.spider_loader.load(spidercls)
  163. return Crawler(spidercls, self.settings)
  164. def stop(self):
  165. """
  166. Stops simultaneously all the crawling jobs taking place.
  167. Returns a deferred that is fired when they all have ended.
  168. """
  169. return defer.DeferredList([c.stop() for c in list(self.crawlers)])
  170. @defer.inlineCallbacks
  171. def join(self):
  172. """
  173. join()
  174. Returns a deferred that is fired when all managed :attr:`crawlers` have
  175. completed their executions.
  176. """
  177. while self._active:
  178. yield defer.DeferredList(self._active)
  179. class CrawlerProcess(CrawlerRunner):
  180. """
  181. A class to run multiple scrapy crawlers in a process simultaneously.
  182. This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
  183. for starting a Twisted `reactor`_ and handling shutdown signals, like the
  184. keyboard interrupt command Ctrl-C. It also configures top-level logging.
  185. This utility should be a better fit than
  186. :class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
  187. Twisted `reactor`_ within your application.
  188. The CrawlerProcess object must be instantiated with a
  189. :class:`~scrapy.settings.Settings` object.
  190. :param install_root_handler: whether to install root logging handler
  191. (default: True)
  192. This class shouldn't be needed (since Scrapy is responsible of using it
  193. accordingly) unless writing scripts that manually handle the crawling
  194. process. See :ref:`run-from-script` for an example.
  195. """
  196. def __init__(self, settings=None, install_root_handler=True):
  197. super(CrawlerProcess, self).__init__(settings)
  198. install_shutdown_handlers(self._signal_shutdown)
  199. configure_logging(self.settings, install_root_handler)
  200. log_scrapy_info(self.settings)
  201. def _signal_shutdown(self, signum, _):
  202. install_shutdown_handlers(self._signal_kill)
  203. signame = signal_names[signum]
  204. logger.info("Received %(signame)s, shutting down gracefully. Send again to force ",
  205. {'signame': signame})
  206. reactor.callFromThread(self._graceful_stop_reactor)
  207. def _signal_kill(self, signum, _):
  208. install_shutdown_handlers(signal.SIG_IGN)
  209. signame = signal_names[signum]
  210. logger.info('Received %(signame)s twice, forcing unclean shutdown',
  211. {'signame': signame})
  212. reactor.callFromThread(self._stop_reactor)
  213. def start(self, stop_after_crawl=True):
  214. """
  215. This method starts a Twisted `reactor`_, adjusts its pool size to
  216. :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache based
  217. on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
  218. If `stop_after_crawl` is True, the reactor will be stopped after all
  219. crawlers have finished, using :meth:`join`.
  220. :param boolean stop_after_crawl: stop or not the reactor when all
  221. crawlers have finished
  222. """
  223. if stop_after_crawl:
  224. d = self.join()
  225. # Don't start the reactor if the deferreds are already fired
  226. if d.called:
  227. return
  228. d.addBoth(self._stop_reactor)
  229. reactor.installResolver(self._get_dns_resolver())
  230. tp = reactor.getThreadPool()
  231. tp.adjustPoolsize(maxthreads=self.settings.getint('REACTOR_THREADPOOL_MAXSIZE'))
  232. reactor.addSystemEventTrigger('before', 'shutdown', self.stop)
  233. reactor.run(installSignalHandlers=False) # blocking call
  234. def _get_dns_resolver(self):
  235. if self.settings.getbool('DNSCACHE_ENABLED'):
  236. cache_size = self.settings.getint('DNSCACHE_SIZE')
  237. else:
  238. cache_size = 0
  239. return CachingThreadedResolver(
  240. reactor=reactor,
  241. cache_size=cache_size,
  242. timeout=self.settings.getfloat('DNS_TIMEOUT')
  243. )
  244. def _graceful_stop_reactor(self):
  245. d = self.stop()
  246. d.addBoth(self._stop_reactor)
  247. return d
  248. def _stop_reactor(self, _=None):
  249. try:
  250. reactor.stop()
  251. except RuntimeError: # raised if already stopped or in shutdown stage
  252. pass
  253. def _get_spider_loader(settings):
  254. """ Get SpiderLoader instance from settings """
  255. if settings.get('SPIDER_MANAGER_CLASS'):
  256. warnings.warn(
  257. 'SPIDER_MANAGER_CLASS option is deprecated. '
  258. 'Please use SPIDER_LOADER_CLASS.',
  259. category=ScrapyDeprecationWarning, stacklevel=2
  260. )
  261. cls_path = settings.get('SPIDER_MANAGER_CLASS',
  262. settings.get('SPIDER_LOADER_CLASS'))
  263. loader_cls = load_object(cls_path)
  264. try:
  265. verifyClass(ISpiderLoader, loader_cls)
  266. except DoesNotImplement:
  267. warnings.warn(
  268. 'SPIDER_LOADER_CLASS (previously named SPIDER_MANAGER_CLASS) does '
  269. 'not fully implement scrapy.interfaces.ISpiderLoader interface. '
  270. 'Please add all missing methods to avoid unexpected runtime errors.',
  271. category=ScrapyDeprecationWarning, stacklevel=2
  272. )
  273. return loader_cls.from_settings(settings.frozencopy())