Skip to content

Commit 7cf6640

Browse files
Analyze included/excluded test files based on YAML job matrix
Compared to the previous regex-based approach, this can distinguish by OS and floatX setting, allowing for more informative outputs.
1 parent 7be2025 commit 7cf6640

File tree

2 files changed

+79
-23
lines changed

2 files changed

+79
-23
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ repos:
3636
- repo: local
3737
hooks:
3838
- id: check-no-tests-are-ignored
39+
additional_dependencies: [pandas,pyyaml]
3940
entry: python scripts/check_all_tests_are_covered.py
4041
files: ^\.github/workflows/pytest\.yml$
4142
language: python

scripts/check_all_tests_are_covered.py

Lines changed: 78 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -6,38 +6,93 @@
66
This is intended to be used as a pre-commit hook, see `.pre-commit-config.yaml`.
77
You can run it manually with `pre-commit run check-no-tests-are-ignored --all`.
88
"""
9+
import itertools
910
import logging
10-
import re
11+
import os
1112

1213
from pathlib import Path
1314

15+
import pandas
16+
import yaml
17+
1418
_log = logging.getLogger(__file__)
19+
logging.basicConfig(level=logging.DEBUG)
1520

1621

17-
if __name__ == "__main__":
18-
testing_workflows = ["jaxtests.yml", "pytest.yml"]
19-
ignored = set()
20-
non_ignored = set()
21-
for wfyml in testing_workflows:
22-
pytest_ci_job = Path(".github") / "workflows" / wfyml
23-
txt = pytest_ci_job.read_text()
24-
ignored = set(re.findall(r"(?<=--ignore=)(pymc3/tests.*\.py)", txt))
25-
non_ignored = non_ignored.union(set(re.findall(r"(?<!--ignore=)(pymc3/tests.*\.py)", txt)))
26-
# Summarize
27-
ignored_by_all = ignored.difference(non_ignored)
28-
run_multiple_times = non_ignored.difference(ignored)
22+
def find_testfiles():
23+
dp_repo = Path(__file__).parent.parent
24+
all_tests = {
25+
str(fp.relative_to(dp_repo)).replace(os.sep, "/")
26+
for fp in (dp_repo / "pymc3" / "tests").glob("**/test_*.py")
27+
}
28+
_log.info("Found %i tests in total.", len(all_tests))
29+
return all_tests
30+
31+
32+
def from_yaml():
33+
"""Determins how often each test file is run per platform and floatX setting.
34+
35+
An exception is raised if tests run multiple times with the same configuration.
36+
"""
37+
# First collect the matrix definitions from testing workflows
38+
matrices = {}
39+
for wf in ["pytest.yml", "windows.yml", "arviz_compat.yml", "jaxtests.yml"]:
40+
wfname = wf.strip(".yml")
41+
wfdef = yaml.safe_load(open(Path(".github", "workflows", wf)))
42+
for jobname, jobdef in wfdef["jobs"].items():
43+
matrix = jobdef.get("strategy", {}).get("matrix", {})
44+
if matrix:
45+
matrices[(wfname, jobname)] = matrix
46+
else:
47+
_log.warning("No matrix in %s/%s", wf, jobname)
48+
49+
# Now create an empty DataFrame to count based on OS/floatX/testfile
50+
all_os = []
51+
all_floatX = []
52+
for matrix in matrices.values():
53+
all_os += matrix["os"]
54+
all_floatX += matrix["floatx"]
55+
all_os = tuple(sorted(set(all_os)))
56+
all_floatX = tuple(sorted(set(all_floatX)))
57+
all_tests = find_testfiles()
58+
59+
df = pandas.DataFrame(
60+
columns=pandas.MultiIndex.from_product(
61+
[sorted(all_floatX), sorted(all_os)], names=["floatX", "os"]
62+
),
63+
index=pandas.Index(sorted(all_tests), name="testfile"),
64+
)
65+
df.loc[:, :] = 0
66+
67+
# Count how often the testfiles are included in job definitions
68+
for matrix in matrices.values():
69+
for os_, floatX, subset in itertools.product(
70+
matrix["os"], matrix["floatx"], matrix["test-subset"]
71+
):
72+
testfiles = subset.split("\n")
73+
ignored = {item.strip("--ignore=") for item in testfiles if item.startswith("--ignore")}
74+
included = {item for item in testfiles if item and not item.startswith("--ignore")}
75+
if ignored and not included:
76+
# if no testfile is specified explicitly pytest runs all except the ignored ones
77+
included = all_tests - ignored
78+
79+
for testfile in included:
80+
df.loc[testfile, (floatX, os_)] += 1
81+
82+
ignored_by_all = set(df[df.eq(0).all(axis=1)].index)
83+
run_multiple_times = set(df[df.gt(1).any(axis=1)].index)
84+
85+
# Print summary, warnings and raise errors on unwanted configurations
86+
_log.info("Number of test runs (❌=0, ✅=once)\n%s", df.replace(0, "❌").replace(1, "✅"))
2987

3088
if ignored_by_all:
31-
_log.warning(
32-
f"The following {len(ignored_by_all)} tests are completely ignored: {ignored_by_all}"
33-
)
89+
_log.warning("%i tests are completely ignored:\n%s", len(ignored_by_all), ignored_by_all)
3490
if run_multiple_times:
35-
_log.warning(
36-
f"The following {len(run_multiple_times)} tests are run multiple times: {run_multiple_times}"
91+
raise Exception(
92+
f"{len(run_multiple_times)} tests are run multiple times with the same OS and floatX setting:\n{run_multiple_times}"
3793
)
38-
if not (ignored_by_all or run_multiple_times):
39-
print(f"✔ All tests will run exactly once.")
94+
return
95+
4096

41-
# Temporarily disabled as we're bringing features back for v4:
42-
# assert not ignored_by_all
43-
assert not run_multiple_times
97+
if __name__ == "__main__":
98+
from_yaml()

0 commit comments

Comments
 (0)