File: test_targeting.py

package info (click to toggle)
python-hypothesis 6.138.0-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 15,272 kB
  • sloc: python: 62,853; ruby: 1,107; sh: 253; makefile: 41; javascript: 6
file content (100 lines) | stat: -rw-r--r-- 3,005 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.

import pytest

from hypothesis import Phase, given, seed, settings, strategies as st, target

from tests.common.utils import Why, xfail_on_crosshair

pytest_plugins = "pytester"

TESTSUITE = """
from hypothesis import given, strategies as st, target

@given(st.integers(min_value=0))
def test_threshold_problem(x):
    target(float(x))
    {0}target(float(x * 2), label="double")
    {0}assert x <= 100000
    assert x <= 100
"""


@pytest.mark.parametrize("multiple", [False, True])
def test_reports_target_results(testdir, multiple):
    script = testdir.makepyfile(TESTSUITE.format("" if multiple else "# "))
    result = testdir.runpytest(script, "--tb=native", "-rN")
    out = "\n".join(result.stdout.lines)
    assert "Falsifying example" in out
    assert "x=101" in out, out
    assert out.count("Highest target score") == 1
    assert result.ret != 0


def test_targeting_increases_max_length():
    strat = st.lists(st.booleans())

    @settings(database=None, max_examples=200, phases=[Phase.generate, Phase.target])
    @given(strat)
    def test_with_targeting(ls):
        target(float(len(ls)))
        assert len(ls) <= 80

    with pytest.raises(AssertionError):
        test_with_targeting()


@given(st.integers(), st.integers())
def test_target_returns_value(a, b):
    difference = target(abs(a - b))
    assert difference == abs(a - b)
    assert isinstance(difference, int)


@xfail_on_crosshair(Why.symbolic_outside_context)
def test_targeting_can_be_disabled():
    strat = st.lists(st.integers(0, 255))

    def score(enabled):
        result = 0
        phases = [Phase.generate]
        if enabled:
            phases.append(Phase.target)

        @seed(0)
        @settings(database=None, max_examples=100, phases=phases)
        @given(strat)
        def test(ls):
            nonlocal result
            # cap the score to avoid long test times by unbounded driving of list
            # length upwards
            score = min(sum(ls), 10_000)
            result = max(result, score)
            target(score)

        test()
        return result

    assert score(enabled=True) > score(enabled=False)


@pytest.mark.skipif(
    settings._current_profile == "crosshair",
    reason="takes ~15 minutes, mostly just unrolling the rejection sampling loop",
)
def test_issue_2395_regression():
    @given(d=st.floats().filter(lambda x: abs(x) < 1000))
    @settings(max_examples=1000, database=None)
    @seed(93962505385993024185959759429298090872)
    def test_targeting_square_loss(d):
        target(-((d - 42.5) ** 2.0))

    test_targeting_square_loss()