File: pytest_examples_not_yet_packaged.patch

package info (click to toggle)
pydantic 2.12.4-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 7,648 kB
  • sloc: python: 75,973; javascript: 181; makefile: 115; sh: 38
file content (304 lines) | stat: -rw-r--r-- 11,165 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
From: Alexandre Detiste <tchet@debian.org>
Date: Sun, 1 Sep 2024 19:42:50 +0100
Subject: dumb patch to disable not runnable tests

Forwarded: no please
---
 tests/test_docs.py | 288 -----------------------------------------------------
 1 file changed, 288 deletions(-)
 delete mode 100644 tests/test_docs.py

diff --git a/tests/test_docs.py b/tests/test_docs.py
deleted file mode 100644
index dabd370..0000000
--- a/tests/test_docs.py
+++ /dev/null
@@ -1,288 +0,0 @@
-from __future__ import annotations as _annotations
-
-import os
-import platform
-import re
-import subprocess
-import sys
-from datetime import datetime
-from pathlib import Path
-from tempfile import NamedTemporaryFile
-from typing import Any
-
-import pytest
-from pydantic_core import core_schema
-from pytest_examples import CodeExample, EvalExample, find_examples
-
-from pydantic.errors import PydanticErrorCodes
-
-INDEX_MAIN = None
-DOCS_ROOT = Path(__file__).parent.parent / 'docs'
-SOURCES_ROOT = Path(__file__).parent.parent / 'pydantic'
-
-
-def skip_docs_tests():
-    if sys.platform not in {'linux', 'darwin'}:
-        return 'not in linux or macos'
-
-    if platform.python_implementation() != 'CPython':
-        return 'not cpython'
-
-    try:
-        import devtools  # noqa: F401
-    except ImportError:
-        return 'devtools not installed'
-
-    try:
-        import sqlalchemy  # noqa: F401
-    except ImportError:
-        return 'sqlalchemy not installed'
-
-    try:
-        import ansi2html  # noqa: F401
-    except ImportError:
-        return 'ansi2html not installed'
-
-
-class GroupModuleGlobals:
-    def __init__(self) -> None:
-        self.name = None
-        self.module_dict: dict[str, str] = {}
-
-    def get(self, name: str | None):
-        if name is not None and name == self.name:
-            return self.module_dict
-
-    def set(self, name: str | None, module_dict: dict[str, str]):
-        self.name = name
-        if self.name is None:
-            self.module_dict = None
-        else:
-            self.module_dict = module_dict
-
-
-group_globals = GroupModuleGlobals()
-
-
-class MockedDatetime(datetime):
-    @classmethod
-    def now(cls, *args, tz=None, **kwargs):
-        return datetime(2032, 1, 2, 3, 4, 5, 6, tzinfo=tz)
-
-
-skip_reason = skip_docs_tests()
-LINE_LENGTH = 80
-TARGET_VERSION = 'py39'
-
-
-def print_callback(print_statement: str) -> str:
-    return re.sub(r'(https://errors.pydantic.dev)/.+?/', r'\1/2/', print_statement)
-
-
-def run_example(example: CodeExample, eval_example: EvalExample, mocker: Any) -> None:  # noqa C901
-    eval_example.print_callback = print_callback
-
-    prefix_settings = example.prefix_settings()
-    test_settings = prefix_settings.get('test', '')
-    lint_settings = prefix_settings.get('lint', '')
-    if test_settings.startswith('skip') and lint_settings.startswith('skip'):
-        pytest.skip('both running code and lint skipped')
-
-    requires_settings = prefix_settings.get('requires')
-    if requires_settings:
-        major, minor = map(int, requires_settings.split('.'))
-        if sys.version_info < (major, minor):
-            pytest.skip(f'requires python {requires_settings}')
-
-    group_name = prefix_settings.get('group')
-
-    eval_example.set_config(
-        ruff_ignore=['D', 'T', 'B', 'C4', 'E721', 'Q001', 'PERF', 'PIE790'],
-        line_length=LINE_LENGTH,
-        target_version=TARGET_VERSION,
-    )
-    if '# ignore-above' in example.source:
-        eval_example.set_config(
-            ruff_ignore=eval_example.config.ruff_ignore + ['E402'],
-            line_length=LINE_LENGTH,
-            target_version=TARGET_VERSION,
-        )
-    if group_name:
-        eval_example.set_config(
-            ruff_ignore=eval_example.config.ruff_ignore + ['F821'],
-            line_length=LINE_LENGTH,
-            target_version=TARGET_VERSION,
-        )
-
-    if not lint_settings.startswith('skip'):
-        if eval_example.update_examples:
-            eval_example.format(example)
-        else:
-            if example.in_py_file():
-                # Ignore isort as double newlines will cause it to fail, but we remove them in py files
-                eval_example.set_config(
-                    ruff_ignore=eval_example.config.ruff_ignore + ['I001'],
-                    line_length=LINE_LENGTH,
-                    target_version=TARGET_VERSION,
-                )
-            eval_example.lint(example)
-
-    if test_settings.startswith('skip'):
-        pytest.skip(test_settings[4:].lstrip(' -') or 'running code skipped')
-
-    group_name = prefix_settings.get('group')
-    d = group_globals.get(group_name)
-
-    mocker.patch('datetime.datetime', MockedDatetime)
-    mocker.patch('random.randint', return_value=3)
-
-    xfail = None
-    if test_settings.startswith('xfail'):
-        xfail = test_settings[5:].lstrip(' -')
-
-    rewrite_assertions = prefix_settings.get('rewrite_assert', 'true') == 'true'
-
-    try:
-        if test_settings == 'no-print-intercept':
-            d2 = eval_example.run(example, module_globals=d, rewrite_assertions=rewrite_assertions)
-        elif eval_example.update_examples:
-            d2 = eval_example.run_print_update(example, module_globals=d, rewrite_assertions=rewrite_assertions)
-        else:
-            d2 = eval_example.run_print_check(example, module_globals=d, rewrite_assertions=rewrite_assertions)
-    except BaseException as e:  # run_print_check raises a BaseException
-        if xfail:
-            pytest.xfail(f'{xfail}, {type(e).__name__}: {e}')
-        raise
-    else:
-        if xfail:
-            pytest.fail('expected xfail')
-        group_globals.set(group_name, d2)
-
-
-@pytest.mark.thread_unsafe
-@pytest.mark.filterwarnings('ignore:(parse_obj_as|schema_json_of|schema_of) is deprecated.*:DeprecationWarning')
-@pytest.mark.skipif(bool(skip_reason), reason=skip_reason or 'not skipping')
-@pytest.mark.parametrize('example', find_examples(str(SOURCES_ROOT), skip=sys.platform == 'win32'), ids=str)
-def test_docstrings_examples(example: CodeExample, eval_example: EvalExample, tmp_path: Path, mocker):
-    if str(example.path).startswith(str(SOURCES_ROOT / 'v1')):
-        pytest.skip('skip v1 examples')
-
-    run_example(example, eval_example, mocker)
-
-
-@pytest.fixture(scope='module', autouse=True)
-def set_cwd():
-    # `test_docs_examples` needs to be run from this folder or relative paths will be wrong and some tests fail
-    execution_path = str(DOCS_ROOT.parent)
-
-    cwd = os.getcwd()
-    os.chdir(execution_path)
-    try:
-        yield
-    finally:
-        os.chdir(cwd)
-
-
-@pytest.mark.thread_unsafe
-@pytest.mark.filterwarnings('ignore:(parse_obj_as|schema_json_of|schema_of) is deprecated.*:DeprecationWarning')
-@pytest.mark.skipif(bool(skip_reason), reason=skip_reason or 'not skipping')
-@pytest.mark.parametrize('example', find_examples(str(DOCS_ROOT), skip=sys.platform == 'win32'), ids=str)
-def test_docs_examples(example: CodeExample, eval_example: EvalExample, tmp_path: Path, mocker):
-    global INDEX_MAIN
-    if example.path.name == 'index.md':
-        if INDEX_MAIN is None:
-            INDEX_MAIN = example.source
-        else:
-            (tmp_path / 'index_main.py').write_text(INDEX_MAIN)
-            sys.path.append(str(tmp_path))
-
-    if example.path.name == 'devtools.md':
-        pytest.skip('tested below')
-
-    run_example(example, eval_example, mocker)
-
-
-@pytest.mark.thread_unsafe
-@pytest.mark.skipif(bool(skip_reason), reason=skip_reason or 'not skipping')
-@pytest.mark.skipif(sys.version_info >= (3, 13), reason='python-devtools does not yet support python 3.13')
-@pytest.mark.parametrize(
-    'example', find_examples(str(DOCS_ROOT / 'integrations/devtools.md'), skip=sys.platform == 'win32'), ids=str
-)
-def test_docs_devtools_example(example: CodeExample, eval_example: EvalExample, tmp_path: Path):
-    from ansi2html import Ansi2HTMLConverter
-
-    eval_example.set_config(ruff_ignore=['D', 'T', 'B', 'C4'], line_length=LINE_LENGTH, target_version=TARGET_VERSION)
-
-    if eval_example.update_examples:
-        eval_example.format(example)
-    else:
-        eval_example.lint(example)
-
-    with NamedTemporaryFile(mode='w', suffix='.py') as f:
-        f.write(example.source)
-        f.flush()
-        os.environ['PY_DEVTOOLS_HIGHLIGHT'] = 'true'
-        p = subprocess.run((sys.executable, f.name), stdout=subprocess.PIPE, check=True, encoding='utf8')
-
-    conv = Ansi2HTMLConverter()
-
-    # replace ugly file path with "devtools_example.py"
-    output = re.sub(r'/.+?\.py', 'devtools_example.py', p.stdout)
-    output_html = conv.convert(output, full=False)
-    output_html = (
-        '<!-- DO NOT EDIT MANUALLY: '
-        'Generated by tests/test_docs.py::test_docs_devtools_example for use in docs -->\n'
-        f'{output_html}'
-    )
-    output_file = DOCS_ROOT / 'plugins/devtools_output.html'
-
-    if eval_example.update_examples:
-        output_file.write_text(output_html)
-    elif not output_file.exists():
-        pytest.fail(f'output file {output_file} does not exist')
-    else:
-        assert output_html == output_file.read_text()
-
-
-def test_error_codes():
-    error_text = (DOCS_ROOT / 'errors/usage_errors.md').read_text()
-
-    code_error_codes = PydanticErrorCodes.__args__
-
-    documented_error_codes = tuple(re.findall(r'^## .+ \{#(.+?)}$', error_text, flags=re.MULTILINE))
-
-    assert code_error_codes == documented_error_codes, 'Error codes in code and docs do not match'
-
-
-def test_validation_error_codes():
-    error_text = (DOCS_ROOT / 'errors/validation_errors.md').read_text()
-
-    expected_validation_error_codes = set(core_schema.ErrorType.__args__)
-    # Remove codes that are not currently accessible from pydantic:
-    expected_validation_error_codes.remove('timezone_offset')  # not currently exposed for configuration in pydantic
-
-    test_failures = []
-
-    documented_validation_error_codes = []
-    error_code_section = None
-    printed_error_code = None
-    for line in error_text.splitlines():
-        section_match = re.fullmatch(r'## `(.+)`', line)
-        if section_match:
-            if error_code_section is not None and printed_error_code != error_code_section:
-                test_failures.append(f'Error code {error_code_section!r} is not printed in its example')
-            error_code_section = section_match.group(1)
-            if error_code_section not in expected_validation_error_codes:
-                test_failures.append(f'Documented error code {error_code_section!r} is not a member of ErrorType')
-            documented_validation_error_codes.append(error_code_section)
-            printed_error_code = None
-            continue
-
-        printed_match = re.search("#> '(.+)'", line)
-        if printed_match:
-            printed_error_code = printed_match.group(1)
-
-    assert test_failures == []
-
-    code_validation_error_codes = sorted(expected_validation_error_codes)
-    assert code_validation_error_codes == documented_validation_error_codes, 'Error codes in code and docs do not match'