File: test_regex_tokenizer.py

package info (click to toggle)
python-ptk 1.3.8%2Bdfsg-1
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 396 kB
  • sloc: python: 3,616; makefile: 202
file content (249 lines) | stat: -rw-r--r-- 8,406 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-

import base, unittest

from ptk.regex import TokenizeError, RegexTokenizer, \
     BackslashAtEndOfInputError, UnterminatedClassError, \
     InvalidClassError, InvalidExponentError, \
     CharacterClass, RegexCharacterClass, LitteralCharacterClass, \
     AnyCharacterClass, ExponentToken, TokenizeError


class TokenizerTestCase(unittest.TestCase):
    def _tokenize(self, regex):
        tokenizer = RegexTokenizer(regex)
        return list(tokenizer.tokens())


class BasicTestCase(TokenizerTestCase):
    def test_close_bracket(self):
        try:
            self._tokenize('foo]')
        except TokenizeError:
            pass
        else:
            self.fail('Did not raise TokenizeError')

    def test_close_brace(self):
        try:
            self._tokenize('foo}')
        except TokenizeError:
            pass
        else:
            self.fail('Did not raise TokenizeError')


class ConcatTestCase(TokenizerTestCase):
    def test_concat(self):
        t1, t2, t3 = self._tokenize('abc')
        self.assertEqual((t1.type, t1.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('a')))
        self.assertEqual((t2.type, t2.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('b')))
        self.assertEqual((t3.type, t3.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('c')))

    def test_escape(self):
        t1, t2 = self._tokenize(r'\[\n')
        self.assertEqual((t1.type, t1.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('[')))
        self.assertEqual((t2.type, t2.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('\n')))

    def test_error(self):
        try:
            self._tokenize('spam\\')
        except BackslashAtEndOfInputError:
            pass
        else:
            self.fail('Did not raise BackslashAtEndOfInputError')


class RangeTestCase(TokenizerTestCase):
    def test_cache(self):
        rx1 = RegexCharacterClass('[a-z]')
        rx2 = RegexCharacterClass('[a-z]')
        self.assertTrue(rx1._rx is rx2._rx)

    def test_unterminated(self):
        try:
            self._tokenize('[acb')
        except UnterminatedClassError:
            pass
        else:
            self.fail('Did not raise UnterminatedClassError')

    def test_invalid(self):
        try:
            self._tokenize('[b-a]')
        except InvalidClassError:
            pass
        else:
            self.fail('Did not raise InvalidClassError')

    def _test_range(self, rx, testin, testout):
        tokens = self._tokenize(rx)
        self.assertEqual(len(tokens), 1)
        type_, value, _ = tokens[0]
        self.assertEqual(type_, RegexTokenizer.TOK_CLASS)
        self.assertTrue(isinstance(value, CharacterClass))
        for item in testin:
            self.assertTrue(item in value, '"%s" should match "%s"' % (item, rx))
        for item in testout:
            self.assertFalse(item in value, '"%s" should not match "%s"' % (item, rx))

    def test_simple(self):
        self._test_range('[acb]', ['a', 'b', 'c'], [' ', 'd'])

    def test_range(self):
        self._test_range('[a-d]', ['a', 'b', 'c', 'd'], [' ', 'e'])

    def test_ranges(self):
        self._test_range('[a-cf-h]', ['a', 'b', 'c', 'f', 'g', 'h'], [' ', 'd', 'e', 'i'])

    def test_minus(self):
        self._test_range('[-a-c]', ['-', 'a', 'b', 'c'], [' ', 'd'])

    def test_special(self):
        self._test_range('[a|]', ['a', '|'], [' ', 'b'])

    def test_escape(self):
        self._test_range(r'[a\]]', ['a', ']'], [' ', 'b'])

    def test_escape_start(self):
        self._test_range(r'[\]-^]', [']', '^'], ['a'])

    def test_escape_end(self):
        self._test_range(r'[\\-\]]', ['\\', ']'], ['a'])

    def test_class_w(self):
        self._test_range(r'\w', ['\u00E9'], ['~'])

    def test_class_d_class(self):
        self._test_range(r'[\wa]', ['\u00E9', 'a'], ['~'])

    def test_class_d(self):
        self._test_range(r'\d', ['0'], ['a'])

    def test_any(self):
        tok1, tok2 = self._tokenize('a.')
        self.assertEqual((tok1.type, tok1.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('a')))
        self.assertEqual((tok2.type, tok2.value), (RegexTokenizer.TOK_CLASS, AnyCharacterClass()))


class ExponentTestCase(TokenizerTestCase):
    def test_invalid(self):
        try:
            self._tokenize('a{b}')
        except InvalidExponentError:
            pass
        else:
            self.fail('Did not raise InvalidExponentError')

    def test_invalid_2(self):
        try:
            self._tokenize('a{1b}')
        except InvalidExponentError:
            pass
        else:
            self.fail('Did not raise InvalidExponentError')

    def test_invalid_3(self):
        try:
            self._tokenize('a{1-2b}')
        except InvalidExponentError:
            pass
        else:
            self.fail('Did not raise InvalidExponentError')

    def test_invalid_end(self):
        try:
            self._tokenize('a{1-a}')
        except InvalidExponentError:
            pass
        else:
            self.fail('Did not raise InvalidExponentError')

    def test_unterminated(self):
        try:
            self._tokenize('a{1')
        except InvalidExponentError:
            pass
        else:
            self.fail('Did not raise InvalidExponentError')

    def test_unterminated_value(self):
        try:
            self._tokenize('a{1-}')
        except InvalidExponentError:
            pass
        else:
            self.fail('Did not raise InvalidExponentError')

    def test_start(self):
        try:
            self._tokenize('a{-2}')
        except InvalidExponentError:
            pass
        else:
            self.fail('Did not raise InvalidExponentError')

    def test_invert(self):
        try:
            self._tokenize('a{2-1}')
        except InvalidExponentError:
            pass
        else:
            self.fail('Did not raise InvalidExponentError')

    def test_single_value(self):
        t1, t2 = self._tokenize('a{42}')
        self.assertEqual((t1.type, t1.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('a')))
        self.assertEqual((t2.type, t2.value), (RegexTokenizer.TOK_EXPONENT, ExponentToken(42, 42)))

    def test_interval(self):
        t1, t2 = self._tokenize('a{13-15}')
        self.assertEqual((t1.type, t1.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('a')))
        self.assertEqual((t2.type, t2.value), (RegexTokenizer.TOK_EXPONENT, ExponentToken(13, 15)))

    def test_kleene(self):
        t1, t2 = self._tokenize('a*')
        self.assertEqual((t1.type, t1.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('a')))
        self.assertEqual((t2.type, t2.value), (RegexTokenizer.TOK_EXPONENT, ExponentToken(0, None)))

    def test_closure(self):
        t1, t2 = self._tokenize('a+')
        self.assertEqual((t1.type, t1.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('a')))
        self.assertEqual((t2.type, t2.value), (RegexTokenizer.TOK_EXPONENT, ExponentToken(1, None)))


class SymbolTestMixin(object):
    token = None # Subclass responsibility

    def test_start(self):
        t1, t2 = self._tokenize(r'{symbol}s'.format(symbol=self.symbol[1]))
        self.assertEqual((t1.type, t1.value), self.symbol)
        self.assertEqual((t2.type, t2.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('s')))

    def test_middle(self):
        t1, t2, t3 = self._tokenize(r's{symbol}e'.format(symbol=self.symbol[1]))
        self.assertEqual((t1.type, t1.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('s')))
        self.assertEqual((t2.type, t2.value), self.symbol)
        self.assertEqual((t3.type, t3.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('e')))

    def test_end(self):
        t1, t2 = self._tokenize(r's{symbol}'.format(symbol=self.symbol[1]))
        self.assertEqual((t1.type, t1.value), (RegexTokenizer.TOK_CLASS, LitteralCharacterClass('s')))
        self.assertEqual((t2.type, t2.value), self.symbol)


class LParenTestCase(SymbolTestMixin, TokenizerTestCase):
    symbol = (RegexTokenizer.TOK_LPAREN, '(')


class RParenTestCase(SymbolTestMixin, TokenizerTestCase):
    symbol = (RegexTokenizer.TOK_RPAREN, ')')


class UnionTestCase(SymbolTestMixin, TokenizerTestCase):
    symbol = (RegexTokenizer.TOK_UNION, '|')


if __name__ == '__main__':
    unittest.main()