-
Notifications
You must be signed in to change notification settings - Fork 11
/
api.py
287 lines (235 loc) · 8.36 KB
/
api.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
"""Crochet-based blocking API for Scrapy."""
import crochet
import six
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.http import Request
from scrapy.utils.log import log_scrapy_info
from scrapy.utils.project import get_project_settings
from scrapy.utils.spider import DefaultSpider
DEFAULT_TIMEOUT = 3600
default_settings = {
'DOWNLOAD_TIMEOUT': 30,
'RETRY_TIMES': 1,
'TELNETCONSOLE_ENABLED': False,
'LOG_LEVEL': 'ERROR',
}
setup = crochet.setup
def fetch(url, **kwargs):
"""Fetches an URL and returns the response.
Parameters
----------
url : str
An URL to crawl.
spider_cls : scrapy.Spider (default: DefaultSpider)
A spider class to be used in the crawler instance.
capture_items : bool (default: True)
If enabled, the scraped items are captured and returned.
return_crawler : bool (default: False)
If enabled, the crawler instance is returned. If ``capture_items`` is
enabled, the scraped items is collected in ``crawler.items``.
settings : dict, optional
Custom crawler settings.
timeout : int, (default: DEFAULT_TIMEOUT)
Result wait timeout.
Returns
-------
out : Response or None
Returns a ``Response`` instance if the crawler is able to retrieve a
response, otherwise it returns ``None``.
Raises
------
crochet.TimeoutError
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT)
kwargs['return_crawler'] = True
crawler = wait_for(timeout, _fetch_in_reactor, url, **kwargs)
if hasattr(crawler.spider, 'response'):
return crawler.spider.response
def crawl(url, callback, **kwargs):
"""Crawls an URL with given callback.
Parameters
----------
url : str
An URL to crawl.
callback : callable
A function to be used as spider callback for the given URL.
spider_cls : scrapy.Spider (default: DefaultSpider)
A spider class to be used in the crawler instance.
capture_items : bool (default: True)
If enabled, the scraped items are captured and returned.
return_crawler : bool (default: False)
If enabled, the crawler instance is returned. If ``capture_items`` is
enabled, the scraped items is collected in ``crawler.items``.
settings : dict, optional
Custom crawler settings.
timeout : int, (default: DEFAULT_TIMEOUT)
Result wait timeout.
Returns
-------
out
By default, the scraped items. If ``return_crawler`` is ``True``,
returns the crawler instance.
Raises
------
crochet.TimeoutError
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT)
return wait_for(timeout, _crawl_in_reactor, url, callback, **kwargs)
def run_spider(spider_cls, **kwargs):
"""Runs a spider and returns the scraped items (by default).
Parameters
----------
spider_cls : scrapy.Spider
A spider class to run.
capture_items : bool (default: True)
If enabled, the scraped items are captured and returned.
return_crawler : bool (default: False)
If enabled, the crawler instance is returned. If ``capture_items`` is
enabled, the scraped items is collected in ``crawler.items``.
settings : dict, optional
Custom crawler settings.
timeout : int, (default: DEFAULT_TIMEOUT)
Result wait timeout.
Returns
-------
out : list or scrapy.crawler.Crawler instance
The scraped items by default or the crawler instance if
``return_crawler`` is ``True``.
Raises
------
crochet.TimeoutError
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT)
return wait_for(timeout, _run_spider_in_reactor, spider_cls, **kwargs)
def _fetch_in_reactor(url, spider_cls=DefaultSpider, **kwargs):
"""Fetches an URL and returns the response.
Parameters
----------
url : str
An URL to fetch.
spider_cls : scrapy.Spider (default: DefaultSpider)
A spider class to be used in the crawler.
kwargs : dict, optional
Additional arguments to be passed to ``_run_spider_in_reactor``.
Returns
-------
crochet.EventualResult
"""
def parse(self, response):
self.response = response
req = Request(url) if isinstance(url, six.string_types) else url
req.dont_filter = True
req.meta['handle_httpstatus_all'] = True
spider_cls = override_start_requests(spider_cls, [req], parse=parse)
return _run_spider_in_reactor(spider_cls, **kwargs)
def _crawl_in_reactor(url, callback, spider_cls=DefaultSpider, **kwargs):
"""Crawls given URL with given callback.
Parameters
----------
url : str
The URL to crawl.
callback : callable
Function to be used as callback for the request.
spider_cls : scrapy.Spider (default: DefaultSpider)
A spider class to be used in the crawler instance.
kwargs : dict, optional
Extra arguments to be passed to ``_run_spider_in_reactor``.
Returns
-------
crochet.EventualResult
"""
spider_cls = override_start_requests(spider_cls, [url], callback)
return _run_spider_in_reactor(spider_cls, **kwargs)
@crochet.run_in_reactor
def _run_spider_in_reactor(spider_cls, capture_items=True, return_crawler=False,
settings=None, **kwargs):
"""Runs given spider inside the twisted reactdor.
Parameters
----------
spider_cls : scrapy.Spider
Spider to run.
capture_items : bool (default: True)
If enabled, the scraped items are captured and returned.
return_crawler : bool (default: False)
If enabled, the crawler instance is returned. If ``capture_items`` is
enabled, the scraped items is collected in ``crawler.items``.
settings : dict, optional
Custom crawler settings.
Returns
-------
out : crochet.EventualResult
If ``capture_items`` is ``True``, returns scraped items. If
``return_crawler`` is ``True``, returns the crawler instance.
"""
settings = settings or {}
crawler_settings = get_project_settings().copy()
crawler_settings.setdict(default_settings)
crawler_settings.setdict(settings)
log_scrapy_info(crawler_settings)
crawler = Crawler(spider_cls, crawler_settings)
d = crawler.crawl(**kwargs)
if capture_items:
crawler.items = _OutputItems()
crawler.signals.connect(crawler.items.append, signal=signals.item_scraped)
d.addCallback(lambda _: crawler.items)
if return_crawler:
d.addCallback(lambda _: crawler)
return d
class _OutputItems(list):
"""A list wrapper to allow to use append as a signal listener."""
def append(self, item):
super(_OutputItems, self).append(item)
def override_start_requests(spider_cls, start_urls, callback=None, **attrs):
"""Returns a new spider class overriding the ``start_requests``.
This function is useful to replace the start requests of an existing spider
class on runtime.
Parameters
----------
spider_cls : scrapy.Spider
Spider class to be used as base class.
start_urls : iterable
Iterable of URLs or ``Request`` objects.
callback : callable, optional
Callback for the start URLs.
attrs : dict, optional
Additional class attributes.
Returns
-------
out : class
A subclass of ``spider_cls`` with overrided ``start_requests`` method.
"""
def start_requests():
for url in start_urls:
req = Request(url, dont_filter=True) if isinstance(url, six.string_types) else url
if callback is not None:
req.callback = callback
yield req
attrs['start_requests'] = staticmethod(start_requests)
return type(spider_cls.__name__, (spider_cls, ), attrs)
def wait_for(timeout, func, *args, **kwargs):
"""Waits for a eventual result.
Parameters
----------
timeout : int
How much time to wait, in seconds.
func : callable
A function that returns ``crochet.EventualResult``.
args : tuple, optional
Arguments for ``func``.
kwargs : dict, optional
Keyword arguments for ``func``.
Returns
-------
out
Given ``func`` result.
Raises
------
corchet.TimeoutError
"""
result = func(*args, **kwargs)
try:
return result.wait(timeout)
except crochet.TimeoutError:
result.cancel()
raise