File: test_fstring.py

package info (click to toggle)
python-libcst 1.4.0-1.2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 5,928 kB
  • sloc: python: 76,235; makefile: 10; sh: 2
file content (49 lines) | stat: -rw-r--r-- 1,606 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
#
# Modifications:
# Copyright David Halter and Contributors
# Modifications are dual-licensed: MIT and PSF.
# 99% of the code is different from pgen2, now.
#
# A fork of Parso's tokenize test
# https://github.com/davidhalter/parso/blob/master/test/test_tokenize.py
#
# The following changes were made:
# - Convert base test to Unittet
# - Remove grammar-specific tests
# pyre-unsafe
from libcst._parser.parso.python.tokenize import tokenize
from libcst._parser.parso.utils import parse_version_string
from libcst.testing.utils import data_provider, UnitTest


class ParsoTokenizeTest(UnitTest):
    @data_provider(
        (
            # 2 times 2, 5 because python expr and endmarker.
            ('f"}{"', [(1, 0), (1, 2), (1, 3), (1, 4), (1, 5)]),
            (
                'f" :{ 1 : } "',
                [
                    (1, 0),
                    (1, 2),
                    (1, 4),
                    (1, 6),
                    (1, 8),
                    (1, 9),
                    (1, 10),
                    (1, 11),
                    (1, 12),
                    (1, 13),
                ],
            ),
            (
                'f"""\n {\nfoo\n }"""',
                [(1, 0), (1, 4), (2, 1), (3, 0), (4, 1), (4, 2), (4, 5)],
            ),
        )
    )
    def test_tokenize_start_pos(self, code, positions):
        tokens = list(tokenize(code, version_info=parse_version_string("3.6")))
        assert positions == [p.start_pos for p in tokens]