1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
|
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import crosshair
import pytest
from crosshair.core import IgnoreAttempt, NotDeterministic, UnexploredPath
from hypothesis_crosshair_provider.crosshair_provider import CrossHairPrimitiveProvider
from hypothesis import Phase, Verbosity, event, given, settings, strategies as st
from hypothesis.database import InMemoryExampleDatabase
from hypothesis.internal.conjecture.provider_conformance import run_conformance_test
from hypothesis.internal.conjecture.providers import COLLECTION_DEFAULT_MAX_SIZE
from hypothesis.internal.intervalsets import IntervalSet
from hypothesis.internal.observability import with_observability_callback
from hypothesis.vendor.pretty import pretty
from tests.common.utils import capture_observations
from tests.conjecture.common import float_constr, integer_constr, string_constr
@pytest.mark.parametrize("verbosity", list(Verbosity))
def test_crosshair_works_for_all_verbosities(verbosity):
# check that we aren't realizing symbolics early in debug prints and killing
# test effectiveness.
@given(st.integers())
@settings(backend="crosshair", verbosity=verbosity, database=None)
def f(n):
assert n != 123456
with pytest.raises(AssertionError):
f()
@pytest.mark.parametrize("verbosity", list(Verbosity))
def test_crosshair_works_for_all_verbosities_data(verbosity):
# data draws have their own print path
@given(st.data())
@settings(backend="crosshair", verbosity=verbosity, database=None)
def f(data):
n = data.draw(st.integers())
assert n != 123456
with pytest.raises(AssertionError):
f()
def test_hypothesis_realizes_on_fatal_error():
# BaseException or internal hypothesis failures have a different database save
# path. Make sure we realize symbolic values on that path. This test is a bit
# of a no-op because we're really relying on our realization validation to
# pass here.
db = InMemoryExampleDatabase()
@given(st.integers())
@settings(database=db, backend="crosshair")
def f(n):
raise BaseException("marker")
with pytest.raises(BaseException, match="marker"):
f()
def count_choices_for(choice_type, constraints):
# returns the number of choices that crosshair makes for this draw, before
# hypothesis ever has a chance to interact with it.
provider = CrossHairPrimitiveProvider()
with provider.per_test_case_context_manager():
assert len(crosshair.statespace.context_statespace().choices_made) == 0
getattr(provider, f"draw_{choice_type}")(**constraints)
return len(crosshair.statespace.context_statespace().choices_made)
@pytest.mark.parametrize(
"strategy, expected_choices",
[
(st.integers(), lambda: count_choices_for("integer", integer_constr())),
(st.floats(), lambda: count_choices_for("float", float_constr())),
(
st.binary(),
lambda: count_choices_for(
"bytes", {"min_size": 0, "max_size": COLLECTION_DEFAULT_MAX_SIZE}
),
),
(st.booleans(), lambda: count_choices_for("boolean", {})),
(
st.text(),
lambda: count_choices_for(
"string", string_constr(IntervalSet.from_string("a"))
),
),
],
ids=pretty,
)
def test_no_path_constraints_are_added_to_symbolic_values(strategy, expected_choices):
# check that we don't interact with returned symbolics from the crosshair
# provider in a way that would add decisions to crosshair's state space (ie
# add path constraints).
expected_choices = expected_choices()
# skip the first call, which is ChoiceTemplate(type="simplest") with the
# hypothesis backend.
called = False
# limit to one example to prevent crosshair from raising e.g.
# BackendCannotProceed(scope="verified") and switching to the hypothesis
# provider
@given(strategy)
@settings(
backend="crosshair", database=None, phases={Phase.generate}, max_examples=2
)
def f(value):
nonlocal called
if not called:
called = True
return
# if this test ever fails, we will replay it without crosshair, in which
# case the statespace is None.
statespace = crosshair.statespace.optional_context_statespace()
assert statespace is not None, "this test failed under crosshair"
assert len(statespace.choices_made) == expected_choices
f()
@pytest.mark.parametrize(
"strategy, extra_observability",
[
# we add an additional path constraint to ints in to_jsonable.
(st.integers(), 1),
(st.text(), 0),
(st.booleans(), 0),
(st.floats(), 0),
(st.binary(), 0),
],
)
def test_observability_and_verbosity_dont_add_choices(strategy, extra_observability):
choices = {}
# skip the first call, which is ChoiceTemplate(type="simplest") with the
# hypothesis backend.
called = False
@given(strategy)
@settings(backend="crosshair", database=None, max_examples=2)
def f_normal(value):
nonlocal called
if called:
choices["normal"] = len(
crosshair.statespace.context_statespace().choices_made
)
called = True
f_normal()
called = False
@given(strategy)
@settings(
backend="crosshair", database=None, max_examples=2, verbosity=Verbosity.debug
)
def f_verbosity(value):
nonlocal called
if called:
choices["verbosity"] = len(
crosshair.statespace.context_statespace().choices_made
)
called = True
f_verbosity()
called = False
@given(strategy)
@settings(backend="crosshair", database=None, max_examples=2)
def f_observability(value):
nonlocal called
if called:
choices["observability"] = len(
crosshair.statespace.context_statespace().choices_made
)
called = True
with capture_observations():
f_observability()
assert (
choices["normal"]
== (choices["observability"] - extra_observability)
== choices["verbosity"]
)
def test_provider_conformance_crosshair():
# Hypothesis can in theory pass values of any type to `realize`,
# but the default strategy in the conformance test here acts too much like a
# fuzzer for crosshair internals here and finds very strange errors.
_realize_objects = (
st.integers() | st.floats() | st.booleans() | st.binary() | st.text()
)
run_conformance_test(
CrossHairPrimitiveProvider,
context_manager_exceptions=(IgnoreAttempt, UnexploredPath, NotDeterministic),
settings=settings(max_examples=5, stateful_step_count=10),
_realize_objects=_realize_objects,
)
def test_realizes_event():
saw_myevent = False
def callback(observation):
if observation.type != "test_case":
return
nonlocal saw_myevent
# crosshair might raise BackendCannotProceed(verified) during generation,
# which never reaches event().
if "myevent" in observation.features:
assert isinstance(observation.features["myevent"], int)
saw_myevent = True
@given(st.integers())
@settings(backend="crosshair", max_examples=5)
def test(n):
event("myevent", n)
with with_observability_callback(callback):
test()
assert saw_myevent
|