1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
|
"""Tests for the hypothesmith.syntactic module."""
import ast
import io
import sys
import tokenize
import black
import blib2to3
import parso
import pytest
from hypothesis import example, given, reject, strategies as st
import hypothesmith
def fixup(s):
"""Avoid known issues with tokenize() by editing the string."""
return "".join(x for x in s if x.isprintable()).strip().strip("\\").strip() + "\n"
@pytest.mark.xfail
@example("#")
@example("\n\\\n")
@example("#\n\x0cpass#\n")
@given(source_code=hypothesmith.from_grammar().map(fixup).filter(str.strip))
def test_tokenize_round_trip_bytes(source_code):
try:
source = source_code.encode("utf-8-sig")
except UnicodeEncodeError:
reject()
tokens = list(tokenize.tokenize(io.BytesIO(source).readline))
outbytes = tokenize.untokenize(tokens) # may have changed whitespace from source
output = list(tokenize.tokenize(io.BytesIO(outbytes).readline))
assert [(t.type, t.string) for t in tokens] == [(t.type, t.string) for t in output]
# It would be nice if the round-tripped string stabilised. It doesn't.
# assert outbytes == tokenize.untokenize(output)
@pytest.mark.xfail
@example("#")
@example("\n\\\n")
@example("#\n\x0cpass#\n")
@given(source_code=hypothesmith.from_grammar().map(fixup).filter(str.strip))
def test_tokenize_round_trip_string(source_code):
tokens = list(tokenize.generate_tokens(io.StringIO(source_code).readline))
outstring = tokenize.untokenize(tokens) # may have changed whitespace from source
output = tokenize.generate_tokens(io.StringIO(outstring).readline)
assert [(t.type, t.string) for t in tokens] == [(t.type, t.string) for t in output]
# It would be nice if the round-tripped string stabilised. It doesn't.
# assert outstring == tokenize.untokenize(output)
@pytest.mark.skipif(not hasattr(ast, "unparse"), reason="Can't test before available")
@given(source_code=hypothesmith.from_grammar())
def test_ast_unparse_from_grammar(source_code):
first = ast.parse(source_code)
unparsed = ast.unparse(first)
second = ast.parse(unparsed)
assert ast.dump(first) == ast.dump(second)
@example("\\", black.Mode())
@example("A#\r#", black.Mode())
@given(
source_code=hypothesmith.from_grammar(),
mode=st.builds(
black.Mode,
line_length=st.just(88) | st.integers(0, 200),
string_normalization=st.booleans(),
is_pyi=st.booleans(),
),
)
def test_black_autoformatter_from_grammar(source_code, mode):
try:
result = black.format_file_contents(source_code, fast=False, mode=mode)
except black.NothingChanged:
pass
except blib2to3.pgen2.tokenize.TokenError:
# Fails to tokenise e.g. "\\", though compile("\\", "<string>", "exec") works.
# See https://github.com/psf/black/issues/1012
reject()
except black.InvalidInput:
# e.g. "A#\r#", see https://github.com/psf/black/issues/970
reject()
else:
with pytest.raises(black.NothingChanged):
black.format_file_contents(result, fast=False, mode=mode)
@given(source_code=hypothesmith.from_grammar("eval_input"))
def test_eval_input_generation(source_code):
compile(source_code, filename="<string>", mode="eval")
@given(source_code=hypothesmith.from_grammar(auto_target=False))
def test_generation_without_targeting(source_code):
compile(source_code, filename="<string>", mode="exec")
@pytest.mark.xfail(sys.version_info >= (3, 13), reason="parso does not support 3.13")
@given(source_code=hypothesmith.from_grammar())
def test_parso_from_grammar(source_code):
result = parso.parse(source_code).get_code()
assert source_code == result
|