Skip to content

Reenable more tests #4770

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 15, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 9 additions & 16 deletions .github/workflows/pytest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,32 +13,25 @@ jobs:
floatx: [float32, float64]
test-subset:
# Tests are split into multiple jobs to accelerate the CI.
# The first job (starting in the next block) shouldn't run any tests, but
# just ignores tests because that don't work at all, or run in other jobs.'
#
# How this works:
# 1st block: Only passes --ignore parameters to pytest.
# → pytest will run all test_*.py files that are NOT ignored.
# Other blocks: Only pass paths to test files.
# → pytest will run only these files
#
# Any test that was not ignored runs in the first job.
# A pre-commit hook (scripts/check_all_tests_are_covered.py) enforces that
# test run just once.

# Because YAML doesn't allow comments in the blocks below, here they are..
# 1st block: These tests are temporarily disabled, because they are _very_ broken
# 2nd block: The JAX tests run through their own workflow: jaxtests.yml
# 3nd & 4rd: These tests are covered by other matrix jobs
# 5th block: These tests PASS without a single XFAIL
# 6th block: These have some XFAILs
# A pre-commit hook (scripts/check_all_tests_are_covered.py)
# enforces that test run just once.
- |
--ignore=pymc3/tests/test_distributions_timeseries.py
--ignore=pymc3/tests/test_missing.py
--ignore=pymc3/tests/test_mixture.py
--ignore=pymc3/tests/test_model_graph.py
--ignore=pymc3/tests/test_modelcontext.py
--ignore=pymc3/tests/test_parallel_sampling.py
--ignore=pymc3/tests/test_profile.py
--ignore=pymc3/tests/test_random.py
--ignore=pymc3/tests/test_shared.py
--ignore=pymc3/tests/test_smc.py
--ignore=pymc3/tests/test_starting.py
--ignore=pymc3/tests/test_step.py
--ignore=pymc3/tests/test_tracetab.py
--ignore=pymc3/tests/test_tuning.py
--ignore=pymc3/tests/test_types.py
--ignore=pymc3/tests/test_variational_inference.py
Expand Down
7 changes: 6 additions & 1 deletion pymc3/tests/test_missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,12 @@ def test_interval_missing_observations():

assert {"theta1", "theta2"} <= set(prior_trace.keys())

trace = sample(chains=1, draws=50, compute_convergence_checks=False)
trace = sample(
chains=1,
draws=50,
compute_convergence_checks=False,
return_inferencedata=False,
)

assert np.all(0 < trace["theta1_missing"].mean(0))
assert np.all(0 < trace["theta2_missing"].mean(0))
Expand Down
22 changes: 15 additions & 7 deletions pymc3/tests/test_starting.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@
# limitations under the License.

import numpy as np

from pytest import raises
import pytest

from pymc3 import (
Beta,
Expand Down Expand Up @@ -47,6 +46,7 @@ def test_accuracy_non_normal():
close_to(newstart["x"], mu, select_by_precision(float64=1e-5, float32=1e-4))


@pytest.mark.xfail(reason="find_MAP fails with derivatives")
def test_find_MAP_discrete():
tol = 2.0 ** -11
alpha = 4
Expand All @@ -68,12 +68,15 @@ def test_find_MAP_discrete():
assert map_est2["ss"] == 14


@pytest.mark.xfail(reason="find_MAP fails with derivatives")
def test_find_MAP_no_gradient():
_, model = simple_arbitrary_det()
with model:
find_MAP()


@pytest.mark.skip(reason="test is slow because it's failing")
@pytest.mark.xfail(reason="find_MAP fails with derivatives")
def test_find_MAP():
tol = 2.0 ** -11 # 16 bit machine epsilon, a low bar
data = np.random.randn(100)
Expand Down Expand Up @@ -106,8 +109,8 @@ def test_find_MAP_issue_4488():
map_estimate = find_MAP()

assert not set.difference({"x_missing", "x_missing_log__", "y"}, set(map_estimate.keys()))
assert np.isclose(map_estimate["x_missing"], 0.2)
np.testing.assert_array_equal(map_estimate["y"], [2.0, map_estimate["x_missing"][0] + 1])
np.testing.assert_allclose(map_estimate["x_missing"], 0.2, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(map_estimate["y"], [2.0, map_estimate["x_missing"][0] + 1])


def test_allinmodel():
Expand All @@ -120,11 +123,16 @@ def test_allinmodel():
x2 = Normal("x2", mu=0, sigma=1)
y2 = Normal("y2", mu=0, sigma=1)

x1 = model1.rvs_to_values[x1]
y1 = model1.rvs_to_values[y1]
x2 = model2.rvs_to_values[x2]
y2 = model2.rvs_to_values[y2]

starting.allinmodel([x1, y1], model1)
starting.allinmodel([x1], model1)
with raises(ValueError, match=r"Some variables not in the model: \['x2', 'y2'\]"):
with pytest.raises(ValueError, match=r"Some variables not in the model: \['x2', 'y2'\]"):
starting.allinmodel([x2, y2], model1)
with raises(ValueError, match=r"Some variables not in the model: \['x2'\]"):
with pytest.raises(ValueError, match=r"Some variables not in the model: \['x2'\]"):
starting.allinmodel([x2, y1], model1)
with raises(ValueError, match=r"Some variables not in the model: \['x2'\]"):
with pytest.raises(ValueError, match=r"Some variables not in the model: \['x2'\]"):
starting.allinmodel([x2], model1)