1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
|
import functools
import logging
from collections import defaultdict
from inspect import signature
from warnings import warn
from twisted.internet.defer import Deferred, DeferredList
from twisted.python.failure import Failure
from scrapy.settings import Settings
from scrapy.utils.datatypes import SequenceExclude
from scrapy.utils.defer import mustbe_deferred, defer_result
from scrapy.utils.deprecate import ScrapyDeprecationWarning
from scrapy.utils.request import request_fingerprint
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.log import failure_to_exc_info
logger = logging.getLogger(__name__)
class MediaPipeline:
LOG_FAILED_RESULTS = True
class SpiderInfo:
def __init__(self, spider):
self.spider = spider
self.downloading = set()
self.downloaded = {}
self.waiting = defaultdict(list)
def __init__(self, download_func=None, settings=None):
self.download_func = download_func
self._expects_item = {}
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
resolve = functools.partial(self._key_for_pipe,
base_class_name="MediaPipeline",
settings=settings)
self.allow_redirects = settings.getbool(
resolve('MEDIA_ALLOW_REDIRECTS'), False
)
self._handle_statuses(self.allow_redirects)
# Check if deprecated methods are being used and make them compatible
self._make_compatible()
def _handle_statuses(self, allow_redirects):
self.handle_httpstatus_list = None
if allow_redirects:
self.handle_httpstatus_list = SequenceExclude(range(300, 400))
def _key_for_pipe(self, key, base_class_name=None, settings=None):
"""
>>> MediaPipeline()._key_for_pipe("IMAGES")
'IMAGES'
>>> class MyPipe(MediaPipeline):
... pass
>>> MyPipe()._key_for_pipe("IMAGES", base_class_name="MediaPipeline")
'MYPIPE_IMAGES'
"""
class_name = self.__class__.__name__
formatted_key = f"{class_name.upper()}_{key}"
if (
not base_class_name
or class_name == base_class_name
or settings and not settings.get(formatted_key)
):
return key
return formatted_key
@classmethod
def from_crawler(cls, crawler):
try:
pipe = cls.from_settings(crawler.settings)
except AttributeError:
pipe = cls()
pipe.crawler = crawler
return pipe
def open_spider(self, spider):
self.spiderinfo = self.SpiderInfo(spider)
def process_item(self, item, spider):
info = self.spiderinfo
requests = arg_to_iter(self.get_media_requests(item, info))
dlist = [self._process_request(r, info, item) for r in requests]
dfd = DeferredList(dlist, consumeErrors=1)
return dfd.addCallback(self.item_completed, item, info)
def _process_request(self, request, info, item):
fp = request_fingerprint(request)
cb = request.callback or (lambda _: _)
eb = request.errback
request.callback = None
request.errback = None
# Return cached result if request was already seen
if fp in info.downloaded:
return defer_result(info.downloaded[fp]).addCallbacks(cb, eb)
# Otherwise, wait for result
wad = Deferred().addCallbacks(cb, eb)
info.waiting[fp].append(wad)
# Check if request is downloading right now to avoid doing it twice
if fp in info.downloading:
return wad
# Download request checking media_to_download hook output first
info.downloading.add(fp)
dfd = mustbe_deferred(self.media_to_download, request, info, item=item)
dfd.addCallback(self._check_media_to_download, request, info, item=item)
dfd.addBoth(self._cache_result_and_execute_waiters, fp, info)
dfd.addErrback(lambda f: logger.error(
f.value, exc_info=failure_to_exc_info(f), extra={'spider': info.spider})
)
return dfd.addBoth(lambda _: wad) # it must return wad at last
def _make_compatible(self):
"""Make overridable methods of MediaPipeline and subclasses backwards compatible"""
methods = [
"file_path", "media_to_download", "media_downloaded",
"file_downloaded", "image_downloaded", "get_images"
]
for method_name in methods:
method = getattr(self, method_name, None)
if callable(method):
setattr(self, method_name, self._compatible(method))
def _compatible(self, func):
"""Wrapper for overridable methods to allow backwards compatibility"""
self._check_signature(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
if self._expects_item[func.__name__]:
return func(*args, **kwargs)
kwargs.pop('item', None)
return func(*args, **kwargs)
return wrapper
def _check_signature(self, func):
sig = signature(func)
self._expects_item[func.__name__] = True
if 'item' not in sig.parameters:
old_params = str(sig)[1:-1]
new_params = old_params + ", *, item=None"
warn(f'{func.__name__}(self, {old_params}) is deprecated, '
f'please use {func.__name__}(self, {new_params})',
ScrapyDeprecationWarning, stacklevel=2)
self._expects_item[func.__name__] = False
def _modify_media_request(self, request):
if self.handle_httpstatus_list:
request.meta['handle_httpstatus_list'] = self.handle_httpstatus_list
else:
request.meta['handle_httpstatus_all'] = True
def _check_media_to_download(self, result, request, info, item):
if result is not None:
return result
if self.download_func:
# this ugly code was left only to support tests. TODO: remove
dfd = mustbe_deferred(self.download_func, request, info.spider)
dfd.addCallbacks(
callback=self.media_downloaded, callbackArgs=(request, info), callbackKeywords={'item': item},
errback=self.media_failed, errbackArgs=(request, info))
else:
self._modify_media_request(request)
dfd = self.crawler.engine.download(request, info.spider)
dfd.addCallbacks(
callback=self.media_downloaded, callbackArgs=(request, info), callbackKeywords={'item': item},
errback=self.media_failed, errbackArgs=(request, info))
return dfd
def _cache_result_and_execute_waiters(self, result, fp, info):
if isinstance(result, Failure):
# minimize cached information for failure
result.cleanFailure()
result.frames = []
result.stack = None
# This code fixes a memory leak by avoiding to keep references to
# the Request and Response objects on the Media Pipeline cache.
#
# What happens when the media_downloaded callback raises an
# exception, for example a FileException('download-error') when
# the Response status code is not 200 OK, is that the original
# StopIteration exception (which in turn contains the failed
# Response and by extension, the original Request) gets encapsulated
# within the FileException context.
#
# Originally, Scrapy was using twisted.internet.defer.returnValue
# inside functions decorated with twisted.internet.defer.inlineCallbacks,
# encapsulating the returned Response in a _DefGen_Return exception
# instead of a StopIteration.
#
# To avoid keeping references to the Response and therefore Request
# objects on the Media Pipeline cache, we should wipe the context of
# the encapsulated exception when it is a StopIteration instance
#
# This problem does not occur in Python 2.7 since we don't have
# Exception Chaining (https://www.python.org/dev/peps/pep-3134/).
context = getattr(result.value, '__context__', None)
if isinstance(context, StopIteration):
setattr(result.value, '__context__', None)
info.downloading.remove(fp)
info.downloaded[fp] = result # cache result
for wad in info.waiting.pop(fp):
defer_result(result).chainDeferred(wad)
# Overridable Interface
def media_to_download(self, request, info, *, item=None):
"""Check request before starting download"""
pass
def get_media_requests(self, item, info):
"""Returns the media requests to download"""
pass
def media_downloaded(self, response, request, info, *, item=None):
"""Handler for success downloads"""
return response
def media_failed(self, failure, request, info):
"""Handler for failed downloads"""
return failure
def item_completed(self, results, item, info):
"""Called per item when all media requests has been processed"""
if self.LOG_FAILED_RESULTS:
for ok, value in results:
if not ok:
logger.error(
'%(class)s found errors processing %(item)s',
{'class': self.__class__.__name__, 'item': item},
exc_info=failure_to_exc_info(value),
extra={'spider': info.spider}
)
return item
def file_path(self, request, response=None, info=None, *, item=None):
"""Returns the path where downloaded media should be stored"""
pass
|