File: _http.py

package info (click to toggle)
python-mechanize 1%3A0.4.10%2Bds-5
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 1,316 kB
  • sloc: python: 16,656; makefile: 11; sh: 4
file content (315 lines) | stat: -rw-r--r-- 10,316 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
"""HTTP related handlers.

Note that some other HTTP handlers live in more specific modules: _auth.py,
_gzip.py, etc.


Copyright 2002-2006 John J Lee <jjl@pobox.com>

This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
LICENSE included with the distribution).

"""

from __future__ import absolute_import

import logging
import socket
import time
from io import BytesIO

from . import _rfc3986, _sockettimeout
from ._headersutil import is_html
from ._request import Request
from ._response import response_seek_wrapper
from ._urllib2_fork import BaseHandler, HTTPError
from ._equiv import HTTPEquivParser
from .polyglot import create_response_info, RobotFileParser, is_py2, as_unicode

debug = logging.getLogger("mechanize").debug
debug_robots = logging.getLogger("mechanize.robots").debug


def parse_head(fileobj):
    """Return a list of key, value pairs."""
    p = HTTPEquivParser(fileobj.read(4096))
    return p()


class HTTPEquivProcessor(BaseHandler):
    """Append META HTTP-EQUIV headers to regular HTTP headers."""

    handler_order = 300  # before handlers that look at HTTP headers

    def http_response(self, request, response):
        if not hasattr(response, "seek"):
            response = response_seek_wrapper(response)
        http_message = response.info()
        url = response.geturl()
        ct_hdrs = http_message.getheaders("content-type")
        if is_html(ct_hdrs, url, True):
            try:
                try:
                    html_headers = parse_head(response)
                finally:
                    response.seek(0)
            except Exception:
                pass
            else:
                for hdr, val in html_headers:
                    if is_py2:
                        # add a header
                        http_message.dict[hdr.lower()] = val
                        text = hdr + b": " + val
                        for line in text.split(b"\n"):
                            http_message.headers.append(line + b"\n")
                    else:
                        hdr = hdr.decode('iso-8859-1')
                        http_message[hdr] = val.decode('iso-8859-1')
        return response

    https_response = http_response


class MechanizeRobotFileParser(RobotFileParser):

    def __init__(self, url='', opener=None):
        RobotFileParser.__init__(self, url)
        self._opener = opener
        self._timeout = _sockettimeout._GLOBAL_DEFAULT_TIMEOUT

    def set_opener(self, opener=None):
        from . import _opener
        if opener is None:
            opener = _opener.OpenerDirector()
        self._opener = opener

    def set_timeout(self, timeout):
        self._timeout = timeout

    def read(self):
        """Reads the robots.txt URL and feeds it to the parser."""
        if self._opener is None:
            self.set_opener()
        req = Request(self.url, unverifiable=True, visit=False,
                      timeout=self._timeout)
        try:
            f = self._opener.open(req)
        except HTTPError as err:
            f = err
        except (IOError, socket.error, OSError) as exc:
            debug_robots("ignoring error opening %r: %s" %
                         (self.url, exc))
            return
        lines = []
        line = f.readline()
        while line:
            lines.append(line.strip())
            line = f.readline()
        status = f.code
        if status == 401 or status == 403:
            self.disallow_all = True
            debug_robots("disallow all")
        elif status >= 400:
            self.allow_all = True
            debug_robots("allow all")
        elif status == 200 and lines:
            debug_robots("parse lines")
            if is_py2:
                self.parse(lines)
            else:
                # As per: https://developers.google.com/search/docs/advanced/robots/robots_txt
                # robots.txt must be utf-8 encoded and invalid encoding causes
                # bytes to be ignored to be ignored
                self.parse((as_unicode(x, errors='ignore') for x in lines))


class RobotExclusionError(HTTPError):

    def __init__(self, request, *args):
        HTTPError.__init__(self, *args)
        self.request = request


class HTTPRobotRulesProcessor(BaseHandler):
    # before redirections, after everything else
    handler_order = 800
    http_response_class = None

    def __init__(self, rfp_class=MechanizeRobotFileParser):
        self.rfp_class = rfp_class
        self.rfp = None
        self._host = None

    def __copy__(self):
        return self.__class__(self.rfp_class)

    def http_request(self, request):
        scheme = request.get_type()
        if scheme not in ["http", "https"]:
            # robots exclusion only applies to HTTP
            return request

        if request.get_selector() == "/robots.txt":
            # /robots.txt is always OK to fetch
            return request

        host = request.get_host()

        # robots.txt requests don't need to be allowed by robots.txt :-)
        origin_req = getattr(request, "_origin_req", None)
        if (origin_req is not None and
                origin_req.get_selector() == "/robots.txt" and
                origin_req.get_host() == host):
            return request

        if host != self._host:
            self.rfp = self.rfp_class()
            try:
                self.rfp.set_opener(self.parent)
            except AttributeError:
                debug("%r instance does not support set_opener" %
                      self.rfp.__class__)
            self.rfp.set_url(scheme + "://" + host + "/robots.txt")
            self.rfp.set_timeout(request.timeout)
            self.rfp.read()
            self._host = host

        ua = request.get_header("User-agent", "")
        if self.rfp.can_fetch(ua, request.get_full_url()):
            return request
        else:
            # XXX This should really have raised URLError.  Too late now...
            factory = self.http_response_class or create_response_info
            msg = b"request disallowed by robots.txt"
            raise RobotExclusionError(
                request,
                request.get_full_url(),
                403, msg,
                factory(BytesIO()), BytesIO(msg))

    https_request = http_request


class HTTPRefererProcessor(BaseHandler):
    """Add Referer header to requests.

    This only makes sense if you use each RefererProcessor for a single
    chain of requests only (so, for example, if you use a single
    HTTPRefererProcessor to fetch a series of URLs extracted from a single
    page, this will break).

    There's a proper implementation of this in mechanize.Browser.

    """

    def __init__(self):
        self.referer = None

    def http_request(self, request):
        if ((self.referer is not None) and
                not request.has_header("Referer")):
            request.add_unredirected_header("Referer", self.referer)
        return request

    def http_response(self, request, response):
        self.referer = response.geturl()
        return response

    https_request = http_request
    https_response = http_response


def clean_refresh_url(url):
    # e.g. Firefox 1.5 does (something like) this
    if ((url.startswith('"') and url.endswith('"')) or
            (url.startswith("'") and url.endswith("'"))):
        url = url[1:-1]
    return _rfc3986.clean_url(url, 'utf-8')  # XXX encoding


def parse_refresh_header(refresh):
    """
    >>> parse_refresh_header("1; url=http://example.com/")
    (1.0, 'http://example.com/')
    >>> parse_refresh_header("1; url='http://example.com/'")
    (1.0, 'http://example.com/')
    >>> parse_refresh_header("1")
    (1.0, None)
    >>> parse_refresh_header("blah")  # doctest: +IGNORE_EXCEPTION_DETAIL
    Traceback (most recent call last):
    ValueError: invalid literal for float(): blah

    """

    ii = refresh.find(";")
    if ii != -1:
        pause, newurl_spec = float(refresh[:ii]), refresh[ii + 1:]
        jj = newurl_spec.find("=")
        key = None
        if jj != -1:
            key, newurl = newurl_spec[:jj], newurl_spec[jj + 1:]
            newurl = clean_refresh_url(newurl)
        if key is None or key.strip().lower() != "url":
            raise ValueError()
    else:
        pause, newurl = float(refresh), None
    return pause, newurl


class HTTPRefreshProcessor(BaseHandler):
    """Perform HTTP Refresh redirections.

    Note that if a non-200 HTTP code has occurred (for example, a 30x
    redirect), this processor will do nothing.

    By default, only zero-time Refresh headers are redirected.  Use the
    max_time attribute / constructor argument to allow Refresh with longer
    pauses.  Use the honor_time attribute / constructor argument to control
    whether the requested pause is honoured (with a time.sleep()) or
    skipped in favour of immediate redirection.

    Public attributes:

    max_time: see above
    honor_time: see above

    """
    handler_order = 1000

    def __init__(self, max_time=0, honor_time=True):
        self.max_time = max_time
        self.honor_time = honor_time
        self._sleep = time.sleep

    def __copy__(self):
        return self.__class__(self.max_time, self.honor_time)

    def http_response(self, request, response):
        code, msg, hdrs = response.code, response.msg, response.info()

        if code == 200 and 'refresh' in hdrs:
            refresh = hdrs.getheaders("refresh")[0]
            try:
                pause, newurl = parse_refresh_header(refresh)
            except ValueError:
                debug("bad Refresh header: %r" % refresh)
                return response

            if newurl is None:
                newurl = response.geturl()
            if (self.max_time is None) or (pause <= self.max_time):
                if pause > 1E-3 and self.honor_time:
                    self._sleep(pause)
                hdrs["location"] = newurl
                # hardcoded http is NOT a bug
                response = self.parent.error(
                    "http", request, response,
                    "refresh", msg, hdrs)
            else:
                debug("Refresh header ignored: %r" % refresh)

        return response

    https_response = http_response