File: urllength.py

package info (click to toggle)
python-scrapy 2.13.3-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 5,664 kB
  • sloc: python: 52,028; xml: 199; makefile: 25; sh: 7
file content (55 lines) | stat: -rw-r--r-- 1,544 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
"""
Url Length Spider Middleware

See documentation in docs/topics/spider-middleware.rst
"""

from __future__ import annotations

import logging
from typing import TYPE_CHECKING

from scrapy.exceptions import NotConfigured
from scrapy.spidermiddlewares.base import BaseSpiderMiddleware

if TYPE_CHECKING:
    # typing.Self requires Python 3.11
    from typing_extensions import Self

    from scrapy.crawler import Crawler
    from scrapy.http import Request, Response


logger = logging.getLogger(__name__)


class UrlLengthMiddleware(BaseSpiderMiddleware):
    crawler: Crawler

    def __init__(self, maxlength: int):  # pylint: disable=super-init-not-called
        self.maxlength: int = maxlength

    @classmethod
    def from_crawler(cls, crawler: Crawler) -> Self:
        maxlength = crawler.settings.getint("URLLENGTH_LIMIT")
        if not maxlength:
            raise NotConfigured
        o = cls(maxlength)
        o.crawler = crawler
        return o

    def get_processed_request(
        self, request: Request, response: Response | None
    ) -> Request | None:
        if len(request.url) <= self.maxlength:
            return request
        logger.info(
            "Ignoring link (url length > %(maxlength)d): %(url)s ",
            {"maxlength": self.maxlength, "url": request.url},
            extra={"spider": self.crawler.spider},
        )
        assert self.crawler.stats
        self.crawler.stats.inc_value(
            "urllength/request_ignored_count", spider=self.crawler.spider
        )
        return None