|
14 | 14 | from random import Random
|
15 | 15 | from typing import Optional
|
16 | 16 |
|
| 17 | +import pytest |
| 18 | + |
17 | 19 | from hypothesis import HealthCheck, Phase, assume, settings, strategies as st
|
18 |
| -from hypothesis.control import current_build_context |
19 |
| -from hypothesis.errors import InvalidArgument |
| 20 | +from hypothesis.control import current_build_context, currently_in_test_context |
20 | 21 | from hypothesis.internal.conjecture import engine as engine_module
|
21 | 22 | from hypothesis.internal.conjecture.choice import ChoiceNode, ChoiceT
|
22 | 23 | from hypothesis.internal.conjecture.data import ConjectureData, Status
|
@@ -103,10 +104,21 @@ def accept(f):
|
103 | 104 |
|
104 | 105 |
|
105 | 106 | def fresh_data(*, random=None, observer=None) -> ConjectureData:
|
| 107 | + context = current_build_context() if currently_in_test_context() else None |
| 108 | + if context is not None and settings().backend == "crosshair": |
| 109 | + # we should reeaxmine fresh_data sometime and see if we can replace it |
| 110 | + # with nicer and higher level hypothesis idioms. |
| 111 | + # |
| 112 | + # For now it doesn't work well with crosshair tests. This is no big |
| 113 | + # loss, because these tests often rely on hypothesis-provider-specific |
| 114 | + # things. |
| 115 | + pytest.skip( |
| 116 | + "Fresh data is too low level (and too much of a hack) to be " |
| 117 | + "worth supporting when testing with crosshair" |
| 118 | + ) |
| 119 | + |
106 | 120 | if random is None:
|
107 |
| - try: |
108 |
| - context = current_build_context() |
109 |
| - except InvalidArgument: |
| 121 | + if context is None: |
110 | 122 | # ensure usage of fresh_data() is not flaky outside of property tests.
|
111 | 123 | raise ValueError(
|
112 | 124 | "must pass a seeded Random instance to fresh_data() when "
|
|
0 commit comments