1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
|
from __future__ import annotations
import argparse
import dataclasses as dc
import json
import logging
import sys
import token
from abc import ABC, abstractmethod
from argparse import Namespace
from enum import Enum
from functools import cached_property
from pathlib import Path
from tokenize import generate_tokens, TokenInfo
from typing import Any, Iterator, Sequence
from typing_extensions import Never
EMPTY_TOKENS = {
token.COMMENT,
token.DEDENT,
token.ENCODING,
token.INDENT,
token.NEWLINE,
token.NL,
}
BRACKETS = {"{": "}", "(": ")", "[": "]"}
BRACKETS_INV = {j: i for i, j in BRACKETS.items()}
def is_name(t: TokenInfo, *names: str) -> bool:
return t.type == token.NAME and not names or t.string in names
def is_op(t: TokenInfo, *names: str) -> bool:
return t.type == token.OP and not names or t.string in names
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
@dc.dataclass
class LintMessage:
"""This is a datatype representation of the JSON that gets sent to lintrunner
as described here:
https://docs.rs/lintrunner/latest/lintrunner/lint_message/struct.LintMessage.html
"""
code: str
name: str
severity: LintSeverity
char: int | None = None
description: str | None = None
line: int | None = None
original: str | None = None
path: str | None = None
replacement: str | None = None
asdict = dc.asdict
@dc.dataclass
class LintResult:
"""LintResult is a single result from a linter.
Like LintMessage but the .length member allows you to make specific edits to
one location within a file, not just replace the whole file.
Linters can generate recursive results - results that contain other results.
For example, the annotation linter would find two results in this code sample:
index = Union[Optional[str], int]
And the first result, `Union[Optional[str], int]`, contains the second one,
`Optional[str]`, so the first result is recursive but the second is not.
If --fix is selected, the linter does a cycle of tokenizing and fixing all
the non-recursive edits until no edits remain.
"""
name: str
line: int | None = None
char: int | None = None
replacement: str | None = None
length: int | None = None # Not in LintMessage
description: str | None = None
original: str | None = None
is_recursive: bool = False # Not in LintMessage
@property
def is_edit(self) -> bool:
return None not in (self.char, self.length, self.line, self.replacement)
def apply(self, lines: list[str]) -> bool:
if self.line is None:
return False
line = lines[self.line - 1]
if self.char is None:
return False
before = line[: self.char]
if self.length is None:
return False
after = line[self.char + self.length :]
lines[self.line - 1] = f"{before}{self.replacement}{after}"
return True
def as_message(self, code: str, path: str) -> LintMessage:
d = dc.asdict(self)
d.pop("is_recursive")
d.pop("length")
if self.is_edit:
# This is one of our , which we don't want to
# send to lintrunner as a replacement
d["replacement"] = None
return LintMessage(code=code, path=path, severity=LintSeverity.ERROR, **d)
def sort_key(self) -> tuple[int, int, str]:
line = -1 if self.line is None else self.line
char = -1 if self.char is None else self.char
return line, char, self.name
class ParseError(ValueError):
def __init__(self, token: TokenInfo, *args: str) -> None:
super().__init__(*args)
self.token = token
@classmethod
def check(cls, cond: Any, token: TokenInfo, *args: str) -> None:
if not cond:
raise cls(token, *args)
class ArgumentParser(argparse.ArgumentParser):
"""
Adds better help formatting and default arguments to argparse.ArgumentParser
"""
def __init__(
self,
prog: str | None = None,
usage: str | None = None,
description: str | None = None,
epilog: str | None = None,
is_fixer: bool = False,
**kwargs: Any,
) -> None:
super().__init__(prog, usage, description, None, **kwargs)
self._epilog = epilog
help = "A list of files or directories to lint"
self.add_argument("files", nargs="*", help=help)
# TODO(rec): get fromfile_prefix_chars="@", type=argparse.FileType to work
help = "Fix lint errors if possible" if is_fixer else argparse.SUPPRESS
self.add_argument("-f", "--fix", action="store_true", help=help)
help = "Run for lintrunner and print LintMessages which aren't edits"
self.add_argument("-l", "--lintrunner", action="store_true", help=help)
help = "Run for test, print all LintMessages"
self.add_argument("-t", "--test", action="store_true", help=help)
help = "Print more debug info"
self.add_argument("-v", "--verbose", action="store_true", help=help)
def exit(self, status: int = 0, message: str | None = None) -> Never:
"""
Overriding this method is a workaround for argparse throwing away all
line breaks when printing the `epilog` section of the help message.
"""
argv = sys.argv[1:]
if self._epilog and not status and "-h" in argv or "--help" in argv:
print(self._epilog)
super().exit(status, message)
class OmittedLines:
"""Read lines textually and find comment lines that end in 'noqa {linter_name}'"""
omitted: set[int]
def __init__(self, lines: Sequence[str], linter_name: str) -> None:
self.lines = lines
suffix = f"# noqa: {linter_name}"
omitted = ((i, s.rstrip()) for i, s in enumerate(lines))
self.omitted = {i + 1 for i, s in omitted if s.endswith(suffix)}
def __call__(self, tokens: Sequence[TokenInfo]) -> bool:
# A token_line might span multiple physical lines
lines = sorted(i for t in tokens for i in (t.start[0], t.end[0]))
lines_covered = list(range(lines[0], lines[-1] + 1)) if lines else []
return bool(self.omitted.intersection(lines_covered))
class PythonFile:
contents: str
lines: list[str]
path: Path | None
linter_name: str
def __init__(
self,
linter_name: str,
path: Path | None = None,
contents: str | None = None,
) -> None:
self.linter_name = linter_name
self.path = path
if contents is None and path is not None:
contents = path.read_text()
self.contents = contents or ""
self.lines = self.contents.splitlines(keepends=True)
@classmethod
def make(cls, linter_name: str, pc: Path | str | None = None) -> PythonFile:
if isinstance(pc, Path):
return cls(linter_name, path=pc)
return cls(linter_name, contents=pc)
def with_contents(self, contents: str) -> PythonFile:
return PythonFile(self.linter_name, self.path, contents)
@cached_property
def omitted(self) -> OmittedLines:
assert self.linter_name is not None
return OmittedLines(self.lines, self.linter_name)
@cached_property
def tokens(self) -> list[TokenInfo]:
# Might raise IndentationError if the code is mal-indented
return list(generate_tokens(iter(self.lines).__next__))
@cached_property
def token_lines(self) -> list[list[TokenInfo]]:
"""Returns lists of TokenInfo segmented by token.NEWLINE"""
token_lines: list[list[TokenInfo]] = [[]]
for t in self.tokens:
if t.type not in (token.COMMENT, token.ENDMARKER, token.NL):
token_lines[-1].append(t)
if t.type == token.NEWLINE:
token_lines.append([])
if token_lines and not token_lines[-1]:
token_lines.pop()
return token_lines
@cached_property
def import_lines(self) -> list[list[int]]:
froms, imports = [], []
for i, (t, *_) in enumerate(self.token_lines):
if t.type == token.INDENT:
break
if t.type == token.NAME:
if t.string == "from":
froms.append(i)
elif t.string == "import":
imports.append(i)
return [froms, imports]
def bracket_pairs(tokens: Sequence[TokenInfo]) -> dict[int, int]:
"""Returns a dictionary mapping opening to closing brackets"""
braces: dict[int, int] = {}
stack: list[int] = []
for i, t in enumerate(tokens):
if t.type == token.OP:
if t.string in BRACKETS:
stack.append(i)
elif inv := BRACKETS_INV.get(t.string):
ParseError.check(stack, t, "Never opened")
begin = stack.pop()
braces[begin] = i
b = tokens[begin].string
ParseError.check(b == inv, t, f"Mismatched braces '{b}' at {begin}")
if tokens:
ParseError.check(not stack, t, "Left open")
return braces
class ErrorLines:
"""How many lines to display before and after an error"""
WINDOW = 5
BEFORE = 2
AFTER = WINDOW - BEFORE - 1
class FileLinter(ABC):
"""The base class that all token-based linters inherit from"""
description: str
linter_name: str
epilog: str | None = None
is_fixer: bool = True
report_column_numbers: bool = False
@abstractmethod
def _lint(self, python_file: PythonFile) -> Iterator[LintResult]:
raise NotImplementedError
def __init__(self, argv: list[str] | None = None) -> None:
self.argv = argv
self.parser = ArgumentParser(
is_fixer=self.is_fixer,
description=self.description,
epilog=self.epilog,
)
@classmethod
def run(cls) -> Never:
linter = cls()
success = linter.lint_all()
sys.exit(not success)
def lint_all(self) -> bool:
if self.args.fix and self.args.lintrunner:
raise ValueError("--fix and --lintrunner are incompatible")
success = True
for p in self.paths:
success = self._lint_file(p) and success
return self.args.lintrunner or success
@cached_property
def args(self) -> Namespace:
args = self.parser.parse_args(self.argv)
args.lintrunner = args.lintrunner or args.test
return args
@cached_property
def code(self) -> str:
return self.linter_name.upper()
@cached_property
def paths(self) -> list[Path]:
files = []
file_parts = (f for fp in self.args.files for f in fp.split(":"))
for f in file_parts:
if f.startswith("@"):
files.extend(Path(f[1:]).read_text().splitlines())
elif f != "--":
files.append(f)
return sorted(Path(f) for f in files)
def _lint_file(self, p: Path) -> bool:
if self.args.verbose:
print(p, "Reading")
pf = PythonFile(self.linter_name, p)
replacement, results = self._replace(pf)
print(*self._display(pf, results), sep="\n")
if results and self.args.fix and pf.path and pf.contents != replacement:
pf.path.write_text(replacement)
return not results or self.args.fix and all(r.is_edit for r in results)
def _replace(self, pf: PythonFile) -> tuple[str, list[LintResult]]:
# Because of recursive replacements, we need to repeat replacing and reparsing
# from the inside out until all possible replacements are complete
previous_result_count = float("inf")
first_results = None
original = replacement = pf.contents
while True:
try:
results = list(self._lint(pf))
except IndentationError as e:
error, (_name, lineno, column, _line) = e.args
results = [LintResult(error, lineno, column)]
if first_results is None:
first_results = sorted(results, key=LintResult.sort_key)
if not results or len(results) >= previous_result_count:
break
previous_result_count = len(results)
lines = pf.lines[:]
for r in reversed(results):
if not r.is_recursive:
r.apply(lines)
replacement = "".join(lines)
if not any(r.is_recursive for r in results):
break
pf = pf.with_contents(replacement)
if first_results and self.args.lintrunner:
name = f"Suggested fixes for {self.linter_name}"
msg = LintResult(name=name, original=original, replacement=replacement)
first_results.append(msg)
return replacement, first_results
def _display(self, pf: PythonFile, results: list[LintResult]) -> Iterator[str]:
"""Emit a series of human-readable strings representing the results"""
show_edits = not self.args.fix or self.args.verbose
first = True
for r in results:
if show_edits or r.is_edit:
if self.args.test or self.args.lintrunner:
msg = r.as_message(code=self.code, path=str(pf.path))
yield json.dumps(msg.asdict(), sort_keys=True)
continue
if first:
first = False
else:
yield ""
if r.line is None:
yield f"{pf.path}: {r.name}"
else:
yield from (i.rstrip() for i in self._display_window(pf, r))
def _display_window(self, pf: PythonFile, r: LintResult) -> Iterator[str]:
"""Display a window onto the code with an error"""
if r.char is None or not self.report_column_numbers:
yield f"{pf.path}:{r.line}: {r.name}"
else:
yield f"{pf.path}:{r.line}:{r.char + 1}: {r.name}"
begin = max((r.line or 0) - ErrorLines.BEFORE, 1)
end = min(begin + ErrorLines.WINDOW, 1 + len(pf.lines))
for lineno in range(begin, end):
source_line = pf.lines[lineno - 1].rstrip()
yield f"{lineno:5} | {source_line}"
if lineno == r.line:
spaces = 8 + (r.char or 0)
carets = len(source_line) if r.char is None else (r.length or 1)
yield spaces * " " + carets * "^"
def set_logging_level(args: argparse.Namespace, paths: Sequence[Path | str]) -> None:
if args.verbose:
level = logging.NOTSET
elif len(paths) < 1000:
level = logging.DEBUG
else:
level = logging.INFO
fmt = "<%(threadName)s:%(levelname)s> %(message)s"
logging.basicConfig(format=fmt, level=level, stream=sys.stderr)
|