|
6 | 6 | This is intended to be used as a pre-commit hook, see `.pre-commit-config.yaml`.
|
7 | 7 | You can run it manually with `pre-commit run check-no-tests-are-ignored --all`.
|
8 | 8 | """
|
| 9 | +import itertools |
9 | 10 | import logging
|
10 |
| -import re |
| 11 | +import os |
11 | 12 |
|
12 | 13 | from pathlib import Path
|
13 | 14 |
|
| 15 | +import pandas |
| 16 | +import yaml |
| 17 | + |
14 | 18 | _log = logging.getLogger(__file__)
|
| 19 | +logging.basicConfig(level=logging.DEBUG) |
15 | 20 |
|
16 | 21 |
|
17 |
| -if __name__ == "__main__": |
18 |
| - testing_workflows = ["jaxtests.yml", "pytest.yml"] |
19 |
| - ignored = set() |
20 |
| - non_ignored = set() |
21 |
| - for wfyml in testing_workflows: |
22 |
| - pytest_ci_job = Path(".github") / "workflows" / wfyml |
23 |
| - txt = pytest_ci_job.read_text() |
24 |
| - ignored = set(re.findall(r"(?<=--ignore=)(pymc3/tests.*\.py)", txt)) |
25 |
| - non_ignored = non_ignored.union(set(re.findall(r"(?<!--ignore=)(pymc3/tests.*\.py)", txt))) |
26 |
| - # Summarize |
27 |
| - ignored_by_all = ignored.difference(non_ignored) |
28 |
| - run_multiple_times = non_ignored.difference(ignored) |
| 22 | +def find_testfiles(): |
| 23 | + dp_repo = Path(__file__).parent.parent |
| 24 | + all_tests = { |
| 25 | + str(fp.relative_to(dp_repo)).replace(os.sep, "/") |
| 26 | + for fp in (dp_repo / "pymc3" / "tests").glob("**/test_*.py") |
| 27 | + } |
| 28 | + _log.info("Found %i tests in total.", len(all_tests)) |
| 29 | + return all_tests |
| 30 | + |
| 31 | + |
| 32 | +def from_yaml(): |
| 33 | + """Determins how often each test file is run per platform and floatX setting. |
| 34 | +
|
| 35 | + An exception is raised if tests run multiple times with the same configuration. |
| 36 | + """ |
| 37 | + # First collect the matrix definitions from testing workflows |
| 38 | + matrices = {} |
| 39 | + for wf in ["pytest.yml", "windows.yml", "arviz_compat.yml", "jaxtests.yml"]: |
| 40 | + wfname = wf.strip(".yml") |
| 41 | + wfdef = yaml.safe_load(open(Path(".github", "workflows", wf))) |
| 42 | + for jobname, jobdef in wfdef["jobs"].items(): |
| 43 | + matrix = jobdef.get("strategy", {}).get("matrix", {}) |
| 44 | + if matrix: |
| 45 | + matrices[(wfname, jobname)] = matrix |
| 46 | + else: |
| 47 | + _log.warning("No matrix in %s/%s", wf, jobname) |
| 48 | + |
| 49 | + # Now create an empty DataFrame to count based on OS/floatX/testfile |
| 50 | + all_os = [] |
| 51 | + all_floatX = [] |
| 52 | + for matrix in matrices.values(): |
| 53 | + all_os += matrix["os"] |
| 54 | + all_floatX += matrix["floatx"] |
| 55 | + all_os = tuple(sorted(set(all_os))) |
| 56 | + all_floatX = tuple(sorted(set(all_floatX))) |
| 57 | + all_tests = find_testfiles() |
| 58 | + |
| 59 | + df = pandas.DataFrame( |
| 60 | + columns=pandas.MultiIndex.from_product( |
| 61 | + [sorted(all_floatX), sorted(all_os)], names=["floatX", "os"] |
| 62 | + ), |
| 63 | + index=pandas.Index(sorted(all_tests), name="testfile"), |
| 64 | + ) |
| 65 | + df.loc[:, :] = 0 |
| 66 | + |
| 67 | + # Count how often the testfiles are included in job definitions |
| 68 | + for matrix in matrices.values(): |
| 69 | + for os_, floatX, subset in itertools.product( |
| 70 | + matrix["os"], matrix["floatx"], matrix["test-subset"] |
| 71 | + ): |
| 72 | + testfiles = subset.split("\n") |
| 73 | + ignored = {item.strip("--ignore=") for item in testfiles if item.startswith("--ignore")} |
| 74 | + included = {item for item in testfiles if item and not item.startswith("--ignore")} |
| 75 | + if ignored and not included: |
| 76 | + # if no testfile is specified explicitly pytest runs all except the ignored ones |
| 77 | + included = all_tests - ignored |
| 78 | + |
| 79 | + for testfile in included: |
| 80 | + df.loc[testfile, (floatX, os_)] += 1 |
| 81 | + |
| 82 | + ignored_by_all = set(df[df.eq(0).all(axis=1)].index) |
| 83 | + run_multiple_times = set(df[df.gt(1).any(axis=1)].index) |
| 84 | + |
| 85 | + # Print summary, warnings and raise errors on unwanted configurations |
| 86 | + _log.info("Number of test runs (❌=0, ✅=once)\n%s", df.replace(0, "❌").replace(1, "✅")) |
29 | 87 |
|
30 | 88 | if ignored_by_all:
|
31 |
| - _log.warning( |
32 |
| - f"The following {len(ignored_by_all)} tests are completely ignored: {ignored_by_all}" |
33 |
| - ) |
| 89 | + _log.warning("%i tests are completely ignored:\n%s", len(ignored_by_all), ignored_by_all) |
34 | 90 | if run_multiple_times:
|
35 |
| - _log.warning( |
36 |
| - f"The following {len(run_multiple_times)} tests are run multiple times: {run_multiple_times}" |
| 91 | + raise Exception( |
| 92 | + f"{len(run_multiple_times)} tests are run multiple times with the same OS and floatX setting:\n{run_multiple_times}" |
37 | 93 | )
|
38 |
| - if not (ignored_by_all or run_multiple_times): |
39 |
| - print(f"✔ All tests will run exactly once.") |
| 94 | + return |
| 95 | + |
40 | 96 |
|
41 |
| - # Temporarily disabled as we're bringing features back for v4: |
42 |
| - # assert not ignored_by_all |
43 |
| - assert not run_multiple_times |
| 97 | +if __name__ == "__main__": |
| 98 | + from_yaml() |
0 commit comments