File: defaultheaders.py

package info (click to toggle)
python-scrapy 2.13.3-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 5,664 kB
  • sloc: python: 52,028; xml: 199; makefile: 25; sh: 7
file content (38 lines) | stat: -rw-r--r-- 1,035 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
"""
DefaultHeaders downloader middleware

See documentation in docs/topics/downloader-middleware.rst
"""

from __future__ import annotations

from typing import TYPE_CHECKING

from scrapy.utils.python import without_none_values

if TYPE_CHECKING:
    from collections.abc import Iterable

    # typing.Self requires Python 3.11
    from typing_extensions import Self

    from scrapy import Request, Spider
    from scrapy.crawler import Crawler
    from scrapy.http import Response


class DefaultHeadersMiddleware:
    def __init__(self, headers: Iterable[tuple[str, str]]):
        self._headers: Iterable[tuple[str, str]] = headers

    @classmethod
    def from_crawler(cls, crawler: Crawler) -> Self:
        headers = without_none_values(crawler.settings["DEFAULT_REQUEST_HEADERS"])
        return cls(headers.items())

    def process_request(
        self, request: Request, spider: Spider
    ) -> Request | Response | None:
        for k, v in self._headers:
            request.headers.setdefault(k, v)
        return None