File: tokenizer.py

package info (click to toggle)
python-html5rdf 1.2.1-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 3,468 kB
  • sloc: python: 12,794; makefile: 3
file content (253 lines) | stat: -rw-r--r-- 9,252 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253

import codecs
import json
import warnings
import re

import pytest

from html5rdf._tokenizer import HTMLTokenizer
from html5rdf import constants, _utils


class TokenizerTestParser:
    def __init__(self, initialState, lastStartTag=None):
        self.tokenizer = HTMLTokenizer
        self._state = initialState
        self._lastStartTag = lastStartTag

    def parse(self, stream, encoding=None, innerHTML=False):
        # pylint:disable=unused-argument
        tokenizer = self.tokenizer(stream, encoding)
        self.outputTokens = []

        tokenizer.state = getattr(tokenizer, self._state)
        if self._lastStartTag is not None:
            tokenizer.currentToken = {"type": "startTag",
                                      "name": self._lastStartTag}

        types = {v: k for k, v in constants.tokenTypes.items()}
        for token in tokenizer:
            getattr(self, 'process%s' % types[token["type"]])(token)

        return self.outputTokens

    def processDoctype(self, token):
        self.outputTokens.append(["DOCTYPE", token["name"], token["publicId"],
                                  token["systemId"], token["correct"]])

    def processStartTag(self, token):
        self.outputTokens.append(["StartTag", token["name"],
                                  token["data"], token["selfClosing"]])

    def processEmptyTag(self, token):
        if token["name"] not in constants.voidElements:
            self.outputTokens.append("ParseError")
        self.outputTokens.append(["StartTag", token["name"], dict(token["data"][::-1])])

    def processEndTag(self, token):
        self.outputTokens.append(["EndTag", token["name"],
                                  token["selfClosing"]])

    def processComment(self, token):
        self.outputTokens.append(["Comment", token["data"]])

    def processSpaceCharacters(self, token):
        self.outputTokens.append(["Character", token["data"]])
        self.processSpaceCharacters = self.processCharacters

    def processCharacters(self, token):
        self.outputTokens.append(["Character", token["data"]])

    def processEOF(self, token):
        pass

    def processParseError(self, token):
        self.outputTokens.append(["ParseError", token["data"]])


def concatenateCharacterTokens(tokens):
    outputTokens = []
    for token in tokens:
        if "ParseError" not in token and token[0] == "Character":
            if (outputTokens and "ParseError" not in outputTokens[-1] and
                    outputTokens[-1][0] == "Character"):
                outputTokens[-1][1] += token[1]
            else:
                outputTokens.append(token)
        else:
            outputTokens.append(token)
    return outputTokens


def normalizeTokens(tokens):
    # TODO: convert tests to reflect arrays
    for i, token in enumerate(tokens):
        if token[0] == 'ParseError':
            tokens[i] = token[0]
    return tokens


def tokensMatch(expectedTokens, receivedTokens, ignoreErrorOrder,
                ignoreErrors=False):
    """Test whether the test has passed or failed

    If the ignoreErrorOrder flag is set to true we don't test the relative
    positions of parse errors and non parse errors
    """
    checkSelfClosing = False
    for token in expectedTokens:
        if (token[0] == "StartTag" and len(token) == 4 or
                token[0] == "EndTag" and len(token) == 3):
            checkSelfClosing = True
            break

    if not checkSelfClosing:
        for token in receivedTokens:
            if token[0] == "StartTag" or token[0] == "EndTag":
                token.pop()

    if not ignoreErrorOrder and not ignoreErrors:
        expectedTokens = concatenateCharacterTokens(expectedTokens)
        return expectedTokens == receivedTokens
    else:
        # Sort the tokens into two groups; non-parse errors and parse errors
        tokens = {"expected": [[], []], "received": [[], []]}
        for tokenType, tokenList in zip(list(tokens.keys()),
                                        (expectedTokens, receivedTokens)):
            for token in tokenList:
                if token != "ParseError":
                    tokens[tokenType][0].append(token)
                else:
                    if not ignoreErrors:
                        tokens[tokenType][1].append(token)
            tokens[tokenType][0] = concatenateCharacterTokens(tokens[tokenType][0])
        return tokens["expected"] == tokens["received"]


_surrogateRe = re.compile(r"\\u([0-9A-Fa-f]{4})(?:\\u([0-9A-Fa-f]{4}))?")


def unescape(test):
    def decode(inp):
        """Decode \\uXXXX escapes

        This decodes \\uXXXX escapes, possibly into non-BMP characters when
        two surrogate character escapes are adjacent to each other.
        """
        # This cannot be implemented using the unicode_escape codec
        # because that requires its input be ISO-8859-1, and we need
        # arbitrary unicode as input.
        def repl(m):
            if m.group(2) is not None:
                high = int(m.group(1), 16)
                low = int(m.group(2), 16)
                if 0xD800 <= high <= 0xDBFF and 0xDC00 <= low <= 0xDFFF:
                    cp = ((high - 0xD800) << 10) + (low - 0xDC00) + 0x10000
                    return chr(cp)
                else:
                    return chr(high) + chr(low)
            else:
                return chr(int(m.group(1), 16))
        try:
            return _surrogateRe.sub(repl, inp)
        except ValueError:
            # This occurs when chr throws ValueError, which should
            # only be for a lone-surrogate.
            if _utils.supports_lone_surrogates:
                raise
            return None

    test["input"] = decode(test["input"])
    for token in test["output"]:
        if token == "ParseError":
            continue
        else:
            token[1] = decode(token[1])
            if len(token) > 2:
                for key, value in token[2]:
                    del token[2][key]
                    token[2][decode(key)] = decode(value)
    return test


def _doCapitalize(match):
    return match.group(1).upper()


_capitalizeRe = re.compile(r"\W+(\w)").sub


def capitalize(s):
    s = s.lower()
    s = _capitalizeRe(_doCapitalize, s)
    return s


class TokenizerFile(pytest.File):
    def collect(self):
        with codecs.open(str(self.fspath), "r", encoding="utf-8") as fp:
            tests = json.load(fp)
        if 'tests' in tests:
            for i, test in enumerate(tests['tests']):
                yield TokenizerTestCollector.from_parent(self, name=str(i), testdata=test)


class TokenizerTestCollector(pytest.Collector):
    def __init__(self, name, parent=None, config=None, session=None, testdata=None):
        super(TokenizerTestCollector, self).__init__(name, parent, config, session)
        if 'initialStates' not in testdata:
            testdata["initialStates"] = ["Data state"]
        if 'doubleEscaped' in testdata:
            testdata = unescape(testdata)
        self.testdata = testdata

    def collect(self):
        for initialState in self.testdata["initialStates"]:
            initialState = capitalize(initialState)
            item = TokenizerTest.from_parent(self,
                                             name=initialState,
                                             test=self.testdata,
                                             initialState=initialState)
            if self.testdata["input"] is None:
                item.add_marker(pytest.mark.skipif(True, reason="Relies on lone surrogates"))
            yield item


class TokenizerTest(pytest.Item):
    def __init__(self, name, parent, test, initialState):
        super(TokenizerTest, self).__init__(name, parent)
        self.obj = lambda: 1  # this is to hack around skipif needing a function!
        self.test = test
        self.initialState = initialState

    def runtest(self):
        warnings.resetwarnings()
        warnings.simplefilter("error")

        expected = self.test['output']
        if 'lastStartTag' not in self.test:
            self.test['lastStartTag'] = None
        parser = TokenizerTestParser(self.initialState,
                                     self.test['lastStartTag'])
        tokens = parser.parse(self.test['input'])
        received = normalizeTokens(tokens)
        errorMsg = "\n".join(["\n\nInitial state:",
                              self.initialState,
                              "\nInput:", self.test['input'],
                              "\nExpected:", repr(expected),
                              "\nreceived:", repr(tokens)])
        errorMsg = errorMsg
        ignoreErrorOrder = self.test.get('ignoreErrorOrder', False)
        assert tokensMatch(expected, received, ignoreErrorOrder, True), errorMsg

    def repr_failure(self, excinfo):
        traceback = excinfo.traceback
        ntraceback = traceback.cut(path=__file__)
        pytest_ver = getattr(pytest, "version_tuple", ())
        filter_args = (excinfo,) if pytest_ver >= (7, 4, 0) else ()
        excinfo.traceback = ntraceback.filter(*filter_args)

        return excinfo.getrepr(funcargs=True,
                               showlocals=False,
                               style="short", tbfilter=False)