File: test_conjecture_engine.py

package info (click to toggle)
python-hypothesis 6.138.0-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 15,272 kB
  • sloc: python: 62,853; ruby: 1,107; sh: 253; makefile: 41; javascript: 6
file content (132 lines) | stat: -rw-r--r-- 4,334 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.

from hypothesis import given, settings, strategies as st
from hypothesis.database import InMemoryExampleDatabase, choices_from_bytes
from hypothesis.internal.conjecture.data import ConjectureData
from hypothesis.internal.conjecture.engine import ConjectureRunner
from hypothesis.internal.conjecture.shrinker import Shrinker

from tests.common.utils import (
    Why,
    counts_calls,
    non_covering_examples,
    xfail_on_crosshair,
)
from tests.conjecture.common import interesting_origin, run_to_nodes, shrinking_from


@xfail_on_crosshair(Why.nested_given)
def test_lot_of_dead_nodes():
    @run_to_nodes
    def nodes(data):
        for i in range(4):
            if data.draw_integer(0, 2**8 - 1) != i:
                data.mark_invalid()
        data.mark_interesting(interesting_origin())

    assert tuple(n.value for n in nodes) == (0, 1, 2, 3)


def test_saves_data_while_shrinking(monkeypatch):
    key = b"hi there"
    n = 5
    db = InMemoryExampleDatabase()
    assert list(db.fetch(key)) == []
    seen = set()

    monkeypatch.setattr(
        ConjectureRunner,
        "generate_new_examples",
        lambda runner: runner.cached_test_function([bytes([255] * 10)]),
    )

    def f(data):
        x = data.draw_bytes(10, 10)
        if sum(x) >= 2000 and len(seen) < n:
            seen.add(x)
        if x in seen:
            data.mark_interesting(interesting_origin())

    runner = ConjectureRunner(f, settings=settings(database=db), database_key=key)
    runner.run()
    assert runner.interesting_examples
    assert len(seen) == n

    in_db = {choices_from_bytes(b)[0] for b in non_covering_examples(db)}
    assert in_db.issubset(seen)
    assert in_db == seen


def test_can_discard(monkeypatch):
    n = 8

    monkeypatch.setattr(
        ConjectureRunner,
        "generate_new_examples",
        lambda runner: runner.cached_test_function(
            tuple(bytes(v) for i in range(n) for v in [i, i])
        ),
    )

    @run_to_nodes
    def nodes(data):
        seen = set()
        while len(seen) < n:
            seen.add(data.draw_bytes())
        data.mark_interesting(interesting_origin())

    assert len(nodes) == n


@xfail_on_crosshair(Why.nested_given)
@given(st.integers(0, 255), st.integers(0, 255))
def test_cached_with_masked_byte_agrees_with_results(a, b):
    def f(data):
        data.draw_integer(0, 3)

    runner = ConjectureRunner(f)

    cached_a = runner.cached_test_function([a])
    cached_b = runner.cached_test_function([b])

    data_b = ConjectureData.for_choices([b], observer=runner.tree.new_observer())
    runner.test_function(data_b)

    # If the cache found an old result, then it should match the real result.
    # If it did not, then it must be because A and B were different.
    assert (cached_a is cached_b) == (cached_a.nodes == data_b.nodes)


def test_node_programs_fail_efficiently(monkeypatch):
    # Create 256 byte-sized nodes. None of the nodes can be deleted, and
    # every deletion attempt produces a different buffer.
    @shrinking_from(range(256))
    def shrinker(data: ConjectureData):
        values = set()
        for _ in range(256):
            v = data.draw_integer(0, 2**8 - 1)
            values.add(v)
        if len(values) == 256:
            data.mark_interesting(interesting_origin())

    monkeypatch.setattr(
        Shrinker, "run_node_program", counts_calls(Shrinker.run_node_program)
    )
    shrinker.max_stall = 500
    shrinker.fixate_shrink_passes([shrinker.node_program("XX")])

    assert shrinker.shrinks == 0
    assert 250 <= shrinker.calls <= 260
    # The node program should have been run roughly 255 times, with a little
    # bit of wiggle room for implementation details.
    #   - Too many calls mean that failing steps are doing too much work.
    #   - Too few calls mean that this test is probably miscounting and buggy.
    assert 250 <= Shrinker.run_node_program.calls <= 260