Skip to content

Added seeding to draws in tests #195

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def transition_probability_tests(steps, n_states, n_lags, n_draws, atol):
P=pt.as_tensor_variable(P), init_dist=x0, steps=steps, n_lags=n_lags
)

draws = pm.draw(chain, n_draws)
draws = pm.draw(chain, n_draws, random_seed=172)

# Test x0 is uniform over n_states
for i in range(n_lags):
Expand Down
15 changes: 9 additions & 6 deletions pymc_experimental/tests/model_transform/test_conditioning.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,13 @@ def test_observe_dims():


def test_do():
rng = np.random.default_rng(seed=435)
with pm.Model() as m_old:
x = pm.Normal("x", 0, 1e-3)
y = pm.Normal("y", x, 1e-3)
z = pm.Normal("z", y + x, 1e-3)

assert -5 < pm.draw(z) < 5
assert -5 < pm.draw(z, random_seed=rng) < 5
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These ones didn't need seeding but no harm I guess. They were selected to be many standard deviations away from the constraint

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I noticed that, but in my recent and limited experience with the project I observed that random test failure gets flagged as a flaky test issue, so I thought it would be better to seed. Do you have a sense of what probability of test failure the project would accept?

Copy link
Member

@ricardoV94 ricardoV94 Jun 12, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Those are the sum of 3 Normals each with 1e-3 standard deviation, so to get out of the (-5, 5) range, it should take a couple of universe lifetimes :)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Haha, yes, in this case the chance is very small. For my own learning, I just wondered where (approximately) you might draw the line.


m_new = do(m_old, {y: x + 100})

Expand All @@ -106,7 +107,7 @@ def test_do():
assert m_new["y"] in m_new.deterministics
assert m_new["z"] in m_new.free_RVs

assert 95 < pm.draw(m_new["z"]) < 105
assert 95 < pm.draw(m_new["z"], random_seed=rng) < 105

# Test two substitutions
with m_old:
Expand All @@ -118,10 +119,10 @@ def test_do():
assert m_new["x"] not in m_new.deterministics
assert m_new["z"] in m_new.free_RVs

assert 195 < pm.draw(m_new["z"]) < 205
assert 195 < pm.draw(m_new["z"], random_seed=rng) < 205
with m_new:
pm.set_data({"switch": 0})
assert -5 < pm.draw(m_new["z"]) < 5
assert -5 < pm.draw(m_new["z"], random_seed=rng) < 5


def test_do_posterior_predictive():
Expand Down Expand Up @@ -149,22 +150,24 @@ def test_do_posterior_predictive():

@pytest.mark.parametrize("mutable", (False, True))
def test_do_constant(mutable):
rng = np.random.default_rng(seed=122)
with pm.Model() as m:
x = pm.Data("x", 0, mutable=mutable)
y = pm.Normal("y", x, 1e-3)

do_m = do(m, {x: 105})
assert pm.draw(do_m["y"]) > 100
assert pm.draw(do_m["y"], random_seed=rng) > 100


def test_do_deterministic():
rng = np.random.default_rng(seed=435)
with pm.Model() as m:
x = pm.Normal("x", 0, 1e-3)
y = pm.Deterministic("y", x + 105)
z = pm.Normal("z", y, 1e-3)

do_m = do(m, {"z": x - 105})
assert pm.draw(do_m["z"]) < 100
assert pm.draw(do_m["z"], random_seed=rng) < 100


def test_do_dims():
Expand Down
2 changes: 1 addition & 1 deletion pymc_experimental/tests/utils/test_model_fgraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def test_data(inline_views):
pm.set_data({"x": [100.0, 200.0]}, coords={"test_dim": range(2)})

assert m_new.dim_lengths["test_dim"].eval() == 2
np.testing.assert_array_almost_equal(pm.draw(m_new["x"]), [100.0, 200.0])
np.testing.assert_array_almost_equal(pm.draw(m_new["x"], random_seed=63), [100.0, 200.0])


@pytest.mark.parametrize("inline_views", (False, True))
Expand Down