1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
|
# stdlib
from typing import Any, Dict
# 3rd party
import pytest
from coincidence import AdvancedFileRegressionFixture
# this package
import dom_toml
from dom_toml import TomlEncoder, dumps
PEP621 = {
"name": "greppy",
"version": "0.0.0",
"description": "Recursively grep over Python files in the files in the given directory 🔎",
"readme": "README.rst",
"keywords": [],
"authors": [{"email": "dominic@davis-foster.co.uk", "name": "Dominic Davis-Foster"}],
"dynamic": ["requires-python", "classifiers", "dependencies"],
"license": {"file": "LICENSE"},
}
array_of_tables = {"key": [
{"dict1": "dict1_value"},
{"dict2": "dict2_value"},
{"dict3": "dict3_value"},
]}
@pytest.mark.parametrize(
"data",
[
pytest.param({"dotted.key": "string"}, id="dotted.key"),
pytest.param({"key": "☃🚀📦"}, id="unicode"),
pytest.param({"key": "string"}, id="string_value"),
pytest.param({"key": ["list", 'double ""', "single ''"]}, id="list_value"),
pytest.param({
"key": [
"insure",
"auspicious",
"neglect",
"craven",
"match",
"worship",
"wave",
"languid",
"bad",
"news",
"flashy",
"recall",
"mother",
"festive",
"cup",
'double ""',
"single ''",
"mixed '\"",
"newline\n",
"formfeed\f",
"carriage_return\r",
"backslash\\",
"backspace\b",
"tab\t",
]
},
id="long_list"),
pytest.param({"key": {"dict": "dict_value"}}, id="dict_value"),
pytest.param(array_of_tables, id="array_of_tables"),
pytest.param({"section": {"key": "string"}}, id="section_string_value"),
pytest.param({"section": {"key": ["list"]}}, id="section_list_value"),
pytest.param({"project": PEP621}, id="pep621"),
]
)
def test_encoder(data: Dict[str, Any], advanced_file_regression: AdvancedFileRegressionFixture):
as_toml = dumps(data, encoder=TomlEncoder())
advanced_file_regression.check(as_toml, extension=".toml")
assert dom_toml.loads(as_toml) == data
@pytest.mark.parametrize(
"data",
[
pytest.param({"key": ("list", )}, id="tuple_value"),
pytest.param({"section": {"key": ("list", )}}, id="section_tuple_value"),
]
)
def test_encoder_tuples(data: Dict[str, Any], advanced_file_regression: AdvancedFileRegressionFixture):
as_toml = dumps(data, encoder=TomlEncoder())
advanced_file_regression.check(as_toml, extension=".toml")
def test_encoder_inline_table(advanced_file_regression: AdvancedFileRegressionFixture):
source = "[project]\nreadme = { file = 'README.rst', content-type = 'text/x-rst' }\n"
parsed = dom_toml.loads(source)
result = dom_toml.dumps(parsed, encoder=TomlEncoder(preserve=True))
result_normalized = result.replace('"', "'")
expected_inline = "[project]\nreadme = { file = 'README.rst', content-type = 'text/x-rst' }\n"
expected_expanded = "[project.readme]\nfile = 'README.rst'\ncontent-type = 'text/x-rst'\n"
assert result_normalized == expected_inline or result_normalized == expected_expanded, f"Unexpected result:\n{result}"
def test_encoder_inline_table_nested(advanced_file_regression: AdvancedFileRegressionFixture):
source = "[project]\nreadme = { file = 'README.rst', nested = { content-type = 'text/x-rst' } }\n"
parsed = dom_toml.loads(source)
result = dom_toml.dumps(parsed, encoder=TomlEncoder(preserve=True))
result_normalized = result.replace('"', "'")
expected_inline = "[project]\nreadme = { file = 'README.rst', nested = { content-type = 'text/x-rst' } }\n"
expected_expanded = (
"[project.readme]\n"
"file = 'README.rst'\n"
"\n"
"[project.readme.nested]\n"
"content-type = 'text/x-rst'\n"
)
assert result_normalized == expected_inline or result_normalized == expected_expanded, f"Unexpected result:\n{result}"
|