1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
|
# -*- coding: utf-8 -*-
"""
Python Tests
~~~~~~~~~~~~
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.lexers import PythonLexer, Python3Lexer
from pygments.token import Token
class PythonTest(unittest.TestCase):
def setUp(self):
self.lexer = PythonLexer()
def test_cls_builtin(self):
"""
Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo
"""
fragment = 'class TestClass():\n @classmethod\n def hello(cls):\n pass\n'
tokens = [
(Token.Keyword, 'class'),
(Token.Text, ' '),
(Token.Name.Class, 'TestClass'),
(Token.Punctuation, '('),
(Token.Punctuation, ')'),
(Token.Punctuation, ':'),
(Token.Text, '\n'),
(Token.Text, ' '),
(Token.Name.Decorator, '@classmethod'),
(Token.Text, '\n'),
(Token.Text, ' '),
(Token.Keyword, 'def'),
(Token.Text, ' '),
(Token.Name.Function, 'hello'),
(Token.Punctuation, '('),
(Token.Name.Builtin.Pseudo, 'cls'),
(Token.Punctuation, ')'),
(Token.Punctuation, ':'),
(Token.Text, '\n'),
(Token.Text, ' '),
(Token.Keyword, 'pass'),
(Token.Text, '\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
class Python3Test(unittest.TestCase):
def setUp(self):
self.lexer = Python3Lexer()
def testNeedsName(self):
"""
Tests that '@' is recognized as an Operator
"""
fragment = u'S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)\n'
tokens = [
(Token.Name, u'S'),
(Token.Text, u' '),
(Token.Operator, u'='),
(Token.Text, u' '),
(Token.Punctuation, u'('),
(Token.Name, u'H'),
(Token.Text, u' '),
(Token.Operator, u'@'),
(Token.Text, u' '),
(Token.Name, u'beta'),
(Token.Text, u' '),
(Token.Operator, u'-'),
(Token.Text, u' '),
(Token.Name, u'r'),
(Token.Punctuation, u')'),
(Token.Operator, u'.'),
(Token.Name, u'T'),
(Token.Text, u' '),
(Token.Operator, u'@'),
(Token.Text, u' '),
(Token.Name, u'inv'),
(Token.Punctuation, u'('),
(Token.Name, u'H'),
(Token.Text, u' '),
(Token.Operator, u'@'),
(Token.Text, u' '),
(Token.Name, u'V'),
(Token.Text, u' '),
(Token.Operator, u'@'),
(Token.Text, u' '),
(Token.Name, u'H'),
(Token.Operator, u'.'),
(Token.Name, u'T'),
(Token.Punctuation, u')'),
(Token.Text, u' '),
(Token.Operator, u'@'),
(Token.Text, u' '),
(Token.Punctuation, u'('),
(Token.Name, u'H'),
(Token.Text, u' '),
(Token.Operator, u'@'),
(Token.Text, u' '),
(Token.Name, u'beta'),
(Token.Text, u' '),
(Token.Operator, u'-'),
(Token.Text, u' '),
(Token.Name, u'r'),
(Token.Punctuation, u')'),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
def test_pep_515(self):
"""
Tests that the lexer can parse numeric literals with underscores
"""
fragments = (
(Token.Literal.Number.Integer, u'1_000_000'),
(Token.Literal.Number.Float, u'1_000.000_001'),
(Token.Literal.Number.Float, u'1_000e1_000j'),
(Token.Literal.Number.Hex, u'0xCAFE_F00D'),
(Token.Literal.Number.Bin, u'0b_0011_1111_0100_1110'),
(Token.Literal.Number.Oct, u'0o_777_123'),
)
for token, fragment in fragments:
tokens = [
(token, fragment),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
|