File: phystokens.py

package info (click to toggle)
python-coverage 7.6.0%2Bdfsg1-2
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 4,120 kB
  • sloc: python: 30,196; ansic: 1,181; javascript: 773; makefile: 293; sh: 107; xml: 48
file content (187 lines) | stat: -rw-r--r-- 6,935 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt

"""Better tokenizing for coverage.py."""

from __future__ import annotations

import ast
import io
import keyword
import re
import sys
import token
import tokenize

from typing import Iterable

from coverage import env
from coverage.types import TLineNo, TSourceTokenLines


TokenInfos = Iterable[tokenize.TokenInfo]


def _phys_tokens(toks: TokenInfos) -> TokenInfos:
    """Return all physical tokens, even line continuations.

    tokenize.generate_tokens() doesn't return a token for the backslash that
    continues lines.  This wrapper provides those tokens so that we can
    re-create a faithful representation of the original source.

    Returns the same values as generate_tokens()

    """
    last_line: str | None = None
    last_lineno = -1
    last_ttext: str = ""
    for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
        if last_lineno != elineno:
            if last_line and last_line.endswith("\\\n"):
                # We are at the beginning of a new line, and the last line
                # ended with a backslash.  We probably have to inject a
                # backslash token into the stream. Unfortunately, there's more
                # to figure out.  This code::
                #
                #   usage = """\
                #   HEY THERE
                #   """
                #
                # triggers this condition, but the token text is::
                #
                #   '"""\\\nHEY THERE\n"""'
                #
                # so we need to figure out if the backslash is already in the
                # string token or not.
                inject_backslash = True
                if last_ttext.endswith("\\"):
                    inject_backslash = False
                elif ttype == token.STRING:
                    if "\n" in ttext and ttext.split("\n", 1)[0][-1] == "\\":
                        # It's a multi-line string and the first line ends with
                        # a backslash, so we don't need to inject another.
                        inject_backslash = False
                if inject_backslash:
                    # Figure out what column the backslash is in.
                    ccol = len(last_line.split("\n")[-2]) - 1
                    # Yield the token, with a fake token type.
                    yield tokenize.TokenInfo(
                        99999, "\\\n",
                        (slineno, ccol), (slineno, ccol+2),
                        last_line,
                    )
            last_line = ltext
        if ttype not in (tokenize.NEWLINE, tokenize.NL):
            last_ttext = ttext
        yield tokenize.TokenInfo(ttype, ttext, (slineno, scol), (elineno, ecol), ltext)
        last_lineno = elineno


def find_soft_key_lines(source: str) -> set[TLineNo]:
    """Helper for finding lines with soft keywords, like match/case lines."""
    soft_key_lines: set[TLineNo] = set()

    for node in ast.walk(ast.parse(source)):
        if sys.version_info >= (3, 10) and isinstance(node, ast.Match):
            soft_key_lines.add(node.lineno)
            for case in node.cases:
                soft_key_lines.add(case.pattern.lineno)
        elif sys.version_info >= (3, 12) and isinstance(node, ast.TypeAlias):
            soft_key_lines.add(node.lineno)

    return soft_key_lines


def source_token_lines(source: str) -> TSourceTokenLines:
    """Generate a series of lines, one for each line in `source`.

    Each line is a list of pairs, each pair is a token::

        [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]

    Each pair has a token class, and the token text.

    If you concatenate all the token texts, and then join them with newlines,
    you should have your original `source` back, with two differences:
    trailing white space is not preserved, and a final line with no newline
    is indistinguishable from a final line with a newline.

    """

    ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL}
    line: list[tuple[str, str]] = []
    col = 0

    source = source.expandtabs(8).replace("\r\n", "\n")
    tokgen = generate_tokens(source)

    if env.PYBEHAVIOR.soft_keywords:
        soft_key_lines = find_soft_key_lines(source)
    else:
        soft_key_lines = set()

    for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen):
        mark_start = True
        for part in re.split("(\n)", ttext):
            if part == "\n":
                yield line
                line = []
                col = 0
                mark_end = False
            elif part == "":
                mark_end = False
            elif ttype in ws_tokens:
                mark_end = False
            else:
                if mark_start and scol > col:
                    line.append(("ws", " " * (scol - col)))
                    mark_start = False
                tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3]
                if ttype == token.NAME:
                    if keyword.iskeyword(ttext):
                        # Hard keywords are always keywords.
                        tok_class = "key"
                    elif sys.version_info >= (3, 10):   # PYVERSIONS
                        # Need the version_info check to keep mypy from borking
                        # on issoftkeyword here.
                        if env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext):
                            # Soft keywords appear at the start of their line.
                            if len(line) == 0:
                                is_start_of_line = True
                            elif (len(line) == 1) and line[0][0] == "ws":
                                is_start_of_line = True
                            else:
                                is_start_of_line = False
                            if is_start_of_line and sline in soft_key_lines:
                                tok_class = "key"
                line.append((tok_class, part))
                mark_end = True
            scol = 0
        if mark_end:
            col = ecol

    if line:
        yield line


def generate_tokens(text: str) -> TokenInfos:
    """A helper around `tokenize.generate_tokens`.

    Originally this was used to cache the results, but it didn't seem to make
    reporting go faster, and caused issues with using too much memory.

    """
    readline = io.StringIO(text).readline
    return tokenize.generate_tokens(readline)


def source_encoding(source: bytes) -> str:
    """Determine the encoding for `source`, according to PEP 263.

    `source` is a byte string: the text of the program.

    Returns a string, the name of the encoding.

    """
    readline = iter(source.splitlines(True)).__next__
    return tokenize.detect_encoding(readline)[0]