1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Better tokenizing for coverage.py."""
import codecs
import keyword
import re
import sys
import token
import tokenize
from coverage import env
from coverage.backward import iternext, unicode_class
from coverage.misc import contract
def phys_tokens(toks):
"""Return all physical tokens, even line continuations.
tokenize.generate_tokens() doesn't return a token for the backslash that
continues lines. This wrapper provides those tokens so that we can
re-create a faithful representation of the original source.
Returns the same values as generate_tokens()
"""
last_line = None
last_lineno = -1
last_ttext = None
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
if last_lineno != elineno:
if last_line and last_line.endswith("\\\n"):
# We are at the beginning of a new line, and the last line
# ended with a backslash. We probably have to inject a
# backslash token into the stream. Unfortunately, there's more
# to figure out. This code::
#
# usage = """\
# HEY THERE
# """
#
# triggers this condition, but the token text is::
#
# '"""\\\nHEY THERE\n"""'
#
# so we need to figure out if the backslash is already in the
# string token or not.
inject_backslash = True
if last_ttext.endswith("\\"):
inject_backslash = False
elif ttype == token.STRING:
if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
# It's a multi-line string and the first line ends with
# a backslash, so we don't need to inject another.
inject_backslash = False
if inject_backslash:
# Figure out what column the backslash is in.
ccol = len(last_line.split("\n")[-2]) - 1
# Yield the token, with a fake token type.
yield (
99999, "\\\n",
(slineno, ccol), (slineno, ccol+2),
last_line
)
last_line = ltext
if ttype not in (tokenize.NEWLINE, tokenize.NL):
last_ttext = ttext
yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext
last_lineno = elineno
@contract(source='unicode')
def source_token_lines(source):
"""Generate a series of lines, one for each line in `source`.
Each line is a list of pairs, each pair is a token::
[('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
Each pair has a token class, and the token text.
If you concatenate all the token texts, and then join them with newlines,
you should have your original `source` back, with two differences:
trailing whitespace is not preserved, and a final line with no newline
is indistinguishable from a final line with a newline.
"""
ws_tokens = set([token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL])
line = []
col = 0
source = source.expandtabs(8).replace('\r\n', '\n')
tokgen = generate_tokens(source)
for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
mark_start = True
for part in re.split('(\n)', ttext):
if part == '\n':
yield line
line = []
col = 0
mark_end = False
elif part == '':
mark_end = False
elif ttype in ws_tokens:
mark_end = False
else:
if mark_start and scol > col:
line.append(("ws", u" " * (scol - col)))
mark_start = False
tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
if ttype == token.NAME and keyword.iskeyword(ttext):
tok_class = "key"
line.append((tok_class, part))
mark_end = True
scol = 0
if mark_end:
col = ecol
if line:
yield line
class CachedTokenizer(object):
"""A one-element cache around tokenize.generate_tokens.
When reporting, coverage.py tokenizes files twice, once to find the
structure of the file, and once to syntax-color it. Tokenizing is
expensive, and easily cached.
This is a one-element cache so that our twice-in-a-row tokenizing doesn't
actually tokenize twice.
"""
def __init__(self):
self.last_text = None
self.last_tokens = None
@contract(text='unicode')
def generate_tokens(self, text):
"""A stand-in for `tokenize.generate_tokens`."""
if text != self.last_text:
self.last_text = text
readline = iternext(text.splitlines(True))
self.last_tokens = list(tokenize.generate_tokens(readline))
return self.last_tokens
# Create our generate_tokens cache as a callable replacement function.
generate_tokens = CachedTokenizer().generate_tokens
COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE)
@contract(source='bytes')
def _source_encoding_py2(source):
"""Determine the encoding for `source`, according to PEP 263.
`source` is a byte string, the text of the program.
Returns a string, the name of the encoding.
"""
assert isinstance(source, bytes)
# Do this so the detect_encode code we copied will work.
readline = iternext(source.splitlines(True))
# This is mostly code adapted from Py3.2's tokenize module.
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if re.match(r"^utf-8($|-)", enc):
return "utf-8"
if re.match(r"^(latin-1|iso-8859-1|iso-latin-1)($|-)", enc):
return "iso-8859-1"
return orig_enc
# From detect_encode():
# It detects the encoding from the presence of a UTF-8 BOM or an encoding
# cookie as specified in PEP-0263. If both a BOM and a cookie are present,
# but disagree, a SyntaxError will be raised. If the encoding cookie is an
# invalid charset, raise a SyntaxError. Note that if a UTF-8 BOM is found,
# 'utf-8-sig' is returned.
# If no encoding is specified, then the default will be returned.
default = 'ascii'
bom_found = False
encoding = None
def read_or_stop():
"""Get the next source line, or ''."""
try:
return readline()
except StopIteration:
return ''
def find_cookie(line):
"""Find an encoding cookie in `line`."""
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = COOKIE_RE.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = codecs.lookup(encoding)
except LookupError:
# This behavior mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
# codecs in 2.3 were raw tuples of functions, assume the best.
codec_name = getattr(codec, 'name', encoding)
if codec_name != 'utf-8':
# This behavior mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(codecs.BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default
encoding = find_cookie(first)
if encoding:
return encoding
second = read_or_stop()
if not second:
return default
encoding = find_cookie(second)
if encoding:
return encoding
return default
@contract(source='bytes')
def _source_encoding_py3(source):
"""Determine the encoding for `source`, according to PEP 263.
`source` is a byte string: the text of the program.
Returns a string, the name of the encoding.
"""
readline = iternext(source.splitlines(True))
return tokenize.detect_encoding(readline)[0]
if env.PY3:
source_encoding = _source_encoding_py3
else:
source_encoding = _source_encoding_py2
@contract(source='unicode')
def compile_unicode(source, filename, mode):
"""Just like the `compile` builtin, but works on any Unicode string.
Python 2's compile() builtin has a stupid restriction: if the source string
is Unicode, then it may not have a encoding declaration in it. Why not?
Who knows! It also decodes to utf8, and then tries to interpret those utf8
bytes according to the encoding declaration. Why? Who knows!
This function neuters the coding declaration, and compiles it.
"""
source = neuter_encoding_declaration(source)
if env.PY2 and isinstance(filename, unicode_class):
filename = filename.encode(sys.getfilesystemencoding(), "replace")
code = compile(source, filename, mode)
return code
@contract(source='unicode', returns='unicode')
def neuter_encoding_declaration(source):
"""Return `source`, with any encoding declaration neutered."""
if COOKIE_RE.search(source):
source_lines = source.splitlines(True)
for lineno in range(min(2, len(source_lines))):
source_lines[lineno] = COOKIE_RE.sub("# (deleted declaration)", source_lines[lineno])
source = "".join(source_lines)
return source
|