Skip to content

Commit 84bd31d

Browse files
authored
New verbosity_test_case ini option (#11653)
Allow for the output of test case execution to be controlled independently from the application verbosity level. `verbosity_test_case` is the new ini setting to adjust this functionality. Fix #11639
1 parent a2a9aa6 commit 84bd31d

File tree

6 files changed

+273
-9
lines changed

6 files changed

+273
-9
lines changed

changelog/11653.feature.rst

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Added the new :confval:`verbosity_test_cases` configuration option for fine-grained control of test execution verbosity.
2+
See :ref:`Fine-grained verbosity <pytest.fine_grained_verbosity>` for more details.

doc/en/how-to/output.rst

+3-1
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,9 @@ This is done by setting a verbosity level in the configuration file for the spec
325325
``pytest --no-header`` with a value of ``2`` would have the same output as the previous example, but each test inside
326326
the file is shown by a single character in the output.
327327

328-
(Note: currently this is the only option available, but more might be added in the future).
328+
:confval:`verbosity_test_cases`: Controls how verbose the test execution output should be when pytest is executed.
329+
Running ``pytest --no-header`` with a value of ``2`` would have the same output as the first verbosity example, but each
330+
test inside the file gets its own line in the output.
329331

330332
.. _`pytest.detailed_failed_tests_usage`:
331333

doc/en/reference/reference.rst

+13
Original file line numberDiff line numberDiff line change
@@ -1865,6 +1865,19 @@ passed multiple times. The expected format is ``name=value``. For example::
18651865
"auto" can be used to explicitly use the global verbosity level.
18661866

18671867

1868+
.. confval:: verbosity_test_cases
1869+
1870+
Set a verbosity level specifically for test case execution related output, overriding the application wide level.
1871+
1872+
.. code-block:: ini
1873+
1874+
[pytest]
1875+
verbosity_test_cases = 2
1876+
1877+
Defaults to application wide verbosity level (via the ``-v`` command-line option). A special value of
1878+
"auto" can be used to explicitly use the global verbosity level.
1879+
1880+
18681881
.. confval:: xfail_strict
18691882

18701883
If set to ``True``, tests marked with ``@pytest.mark.xfail`` that actually succeed will by default fail the

src/_pytest/config/__init__.py

+2
Original file line numberDiff line numberDiff line change
@@ -1657,6 +1657,8 @@ def getvalueorskip(self, name: str, path=None):
16571657

16581658
#: Verbosity type for failed assertions (see :confval:`verbosity_assertions`).
16591659
VERBOSITY_ASSERTIONS: Final = "assertions"
1660+
#: Verbosity type for test case execution (see :confval:`verbosity_test_cases`).
1661+
VERBOSITY_TEST_CASES: Final = "test_cases"
16601662
_VERBOSITY_INI_DEFAULT: Final = "auto"
16611663

16621664
def get_verbosity(self, verbosity_type: Optional[str] = None) -> int:

src/_pytest/terminal.py

+20-8
Original file line numberDiff line numberDiff line change
@@ -255,6 +255,14 @@ def pytest_addoption(parser: Parser) -> None:
255255
"progress even when capture=no)",
256256
default="progress",
257257
)
258+
Config._add_verbosity_ini(
259+
parser,
260+
Config.VERBOSITY_TEST_CASES,
261+
help=(
262+
"Specify a verbosity level for test case execution, overriding the main level. "
263+
"Higher levels will provide more detailed information about each test case executed."
264+
),
265+
)
258266

259267

260268
def pytest_configure(config: Config) -> None:
@@ -408,7 +416,7 @@ def no_summary(self) -> bool:
408416
@property
409417
def showfspath(self) -> bool:
410418
if self._showfspath is None:
411-
return self.verbosity >= 0
419+
return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) >= 0
412420
return self._showfspath
413421

414422
@showfspath.setter
@@ -417,7 +425,7 @@ def showfspath(self, value: Optional[bool]) -> None:
417425

418426
@property
419427
def showlongtestinfo(self) -> bool:
420-
return self.verbosity > 0
428+
return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0
421429

422430
def hasopt(self, char: str) -> bool:
423431
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
@@ -595,7 +603,7 @@ def pytest_runtest_logreport(self, report: TestReport) -> None:
595603
markup = {"yellow": True}
596604
else:
597605
markup = {}
598-
if self.verbosity <= 0:
606+
if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0:
599607
self._tw.write(letter, **markup)
600608
else:
601609
self._progress_nodeids_reported.add(rep.nodeid)
@@ -604,7 +612,7 @@ def pytest_runtest_logreport(self, report: TestReport) -> None:
604612
self.write_ensure_prefix(line, word, **markup)
605613
if rep.skipped or hasattr(report, "wasxfail"):
606614
reason = _get_raw_skip_reason(rep)
607-
if self.config.option.verbose < 2:
615+
if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2:
608616
available_width = (
609617
(self._tw.fullwidth - self._tw.width_of_current_line)
610618
- len(" [100%]")
@@ -641,7 +649,10 @@ def _is_last_item(self) -> bool:
641649

642650
def pytest_runtest_logfinish(self, nodeid: str) -> None:
643651
assert self._session
644-
if self.verbosity <= 0 and self._show_progress_info:
652+
if (
653+
self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0
654+
and self._show_progress_info
655+
):
645656
if self._show_progress_info == "count":
646657
num_tests = self._session.testscollected
647658
progress_length = len(f" [{num_tests}/{num_tests}]")
@@ -819,8 +830,9 @@ def pytest_collection_finish(self, session: "Session") -> None:
819830
rep.toterminal(self._tw)
820831

821832
def _printcollecteditems(self, items: Sequence[Item]) -> None:
822-
if self.config.option.verbose < 0:
823-
if self.config.option.verbose < -1:
833+
test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES)
834+
if test_cases_verbosity < 0:
835+
if test_cases_verbosity < -1:
824836
counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
825837
for name, count in sorted(counts.items()):
826838
self._tw.line("%s: %d" % (name, count))
@@ -840,7 +852,7 @@ def _printcollecteditems(self, items: Sequence[Item]) -> None:
840852
stack.append(col)
841853
indent = (len(stack) - 1) * " "
842854
self._tw.line(f"{indent}{col}")
843-
if self.config.option.verbose >= 1:
855+
if test_cases_verbosity >= 1:
844856
obj = getattr(col, "obj", None)
845857
doc = inspect.getdoc(obj) if obj else None
846858
if doc:

testing/test_terminal.py

+233
Original file line numberDiff line numberDiff line change
@@ -2611,6 +2611,239 @@ def test_format_trimmed() -> None:
26112611
assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) "
26122612

26132613

2614+
class TestFineGrainedTestCase:
2615+
DEFAULT_FILE_CONTENTS = """
2616+
import pytest
2617+
2618+
@pytest.mark.parametrize("i", range(4))
2619+
def test_ok(i):
2620+
'''
2621+
some docstring
2622+
'''
2623+
pass
2624+
2625+
def test_fail():
2626+
assert False
2627+
"""
2628+
LONG_SKIP_FILE_CONTENTS = """
2629+
import pytest
2630+
2631+
@pytest.mark.skip(
2632+
"some long skip reason that will not fit on a single line with other content that goes"
2633+
" on and on and on and on and on"
2634+
)
2635+
def test_skip():
2636+
pass
2637+
"""
2638+
2639+
@pytest.mark.parametrize("verbosity", [1, 2])
2640+
def test_execute_positive(self, verbosity, pytester: Pytester) -> None:
2641+
# expected: one test case per line (with file name), word describing result
2642+
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=verbosity)
2643+
result = pytester.runpytest(p)
2644+
2645+
result.stdout.fnmatch_lines(
2646+
[
2647+
"collected 5 items",
2648+
"",
2649+
f"{p.name}::test_ok[0] PASSED [ 20%]",
2650+
f"{p.name}::test_ok[1] PASSED [ 40%]",
2651+
f"{p.name}::test_ok[2] PASSED [ 60%]",
2652+
f"{p.name}::test_ok[3] PASSED [ 80%]",
2653+
f"{p.name}::test_fail FAILED [100%]",
2654+
],
2655+
consecutive=True,
2656+
)
2657+
2658+
def test_execute_0_global_1(self, pytester: Pytester) -> None:
2659+
# expected: one file name per line, single character describing result
2660+
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=0)
2661+
result = pytester.runpytest("-v", p)
2662+
2663+
result.stdout.fnmatch_lines(
2664+
[
2665+
"collecting ... collected 5 items",
2666+
"",
2667+
f"{p.name} ....F [100%]",
2668+
],
2669+
consecutive=True,
2670+
)
2671+
2672+
@pytest.mark.parametrize("verbosity", [-1, -2])
2673+
def test_execute_negative(self, verbosity, pytester: Pytester) -> None:
2674+
# expected: single character describing result
2675+
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=verbosity)
2676+
result = pytester.runpytest(p)
2677+
2678+
result.stdout.fnmatch_lines(
2679+
[
2680+
"collected 5 items",
2681+
"....F [100%]",
2682+
],
2683+
consecutive=True,
2684+
)
2685+
2686+
def test_execute_skipped_positive_2(self, pytester: Pytester) -> None:
2687+
# expected: one test case per line (with file name), word describing result, full reason
2688+
p = TestFineGrainedTestCase._initialize_files(
2689+
pytester,
2690+
verbosity=2,
2691+
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
2692+
)
2693+
result = pytester.runpytest(p)
2694+
2695+
result.stdout.fnmatch_lines(
2696+
[
2697+
"collected 1 item",
2698+
"",
2699+
f"{p.name}::test_skip SKIPPED (some long skip",
2700+
"reason that will not fit on a single line with other content that goes",
2701+
"on and on and on and on and on) [100%]",
2702+
],
2703+
consecutive=True,
2704+
)
2705+
2706+
def test_execute_skipped_positive_1(self, pytester: Pytester) -> None:
2707+
# expected: one test case per line (with file name), word describing result, reason truncated
2708+
p = TestFineGrainedTestCase._initialize_files(
2709+
pytester,
2710+
verbosity=1,
2711+
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
2712+
)
2713+
result = pytester.runpytest(p)
2714+
2715+
result.stdout.fnmatch_lines(
2716+
[
2717+
"collected 1 item",
2718+
"",
2719+
f"{p.name}::test_skip SKIPPED (some long ski...) [100%]",
2720+
],
2721+
consecutive=True,
2722+
)
2723+
2724+
def test_execute_skipped__0_global_1(self, pytester: Pytester) -> None:
2725+
# expected: one file name per line, single character describing result (no reason)
2726+
p = TestFineGrainedTestCase._initialize_files(
2727+
pytester,
2728+
verbosity=0,
2729+
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
2730+
)
2731+
result = pytester.runpytest("-v", p)
2732+
2733+
result.stdout.fnmatch_lines(
2734+
[
2735+
"collecting ... collected 1 item",
2736+
"",
2737+
f"{p.name} s [100%]",
2738+
],
2739+
consecutive=True,
2740+
)
2741+
2742+
@pytest.mark.parametrize("verbosity", [-1, -2])
2743+
def test_execute_skipped_negative(self, verbosity, pytester: Pytester) -> None:
2744+
# expected: single character describing result (no reason)
2745+
p = TestFineGrainedTestCase._initialize_files(
2746+
pytester,
2747+
verbosity=verbosity,
2748+
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
2749+
)
2750+
result = pytester.runpytest(p)
2751+
2752+
result.stdout.fnmatch_lines(
2753+
[
2754+
"collected 1 item",
2755+
"s [100%]",
2756+
],
2757+
consecutive=True,
2758+
)
2759+
2760+
@pytest.mark.parametrize("verbosity", [1, 2])
2761+
def test__collect_only_positive(self, verbosity, pytester: Pytester) -> None:
2762+
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=verbosity)
2763+
result = pytester.runpytest("--collect-only", p)
2764+
2765+
result.stdout.fnmatch_lines(
2766+
[
2767+
"collected 5 items",
2768+
"",
2769+
f"<Dir {p.parent.name}>",
2770+
f" <Module {p.name}>",
2771+
" <Function test_ok[0]>",
2772+
" some docstring",
2773+
" <Function test_ok[1]>",
2774+
" some docstring",
2775+
" <Function test_ok[2]>",
2776+
" some docstring",
2777+
" <Function test_ok[3]>",
2778+
" some docstring",
2779+
" <Function test_fail>",
2780+
],
2781+
consecutive=True,
2782+
)
2783+
2784+
def test_collect_only_0_global_1(self, pytester: Pytester) -> None:
2785+
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=0)
2786+
result = pytester.runpytest("-v", "--collect-only", p)
2787+
2788+
result.stdout.fnmatch_lines(
2789+
[
2790+
"collecting ... collected 5 items",
2791+
"",
2792+
f"<Dir {p.parent.name}>",
2793+
f" <Module {p.name}>",
2794+
" <Function test_ok[0]>",
2795+
" <Function test_ok[1]>",
2796+
" <Function test_ok[2]>",
2797+
" <Function test_ok[3]>",
2798+
" <Function test_fail>",
2799+
],
2800+
consecutive=True,
2801+
)
2802+
2803+
def test_collect_only_negative_1(self, pytester: Pytester) -> None:
2804+
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=-1)
2805+
result = pytester.runpytest("--collect-only", p)
2806+
2807+
result.stdout.fnmatch_lines(
2808+
[
2809+
"collected 5 items",
2810+
"",
2811+
f"{p.name}::test_ok[0]",
2812+
f"{p.name}::test_ok[1]",
2813+
f"{p.name}::test_ok[2]",
2814+
f"{p.name}::test_ok[3]",
2815+
f"{p.name}::test_fail",
2816+
],
2817+
consecutive=True,
2818+
)
2819+
2820+
def test_collect_only_negative_2(self, pytester: Pytester) -> None:
2821+
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=-2)
2822+
result = pytester.runpytest("--collect-only", p)
2823+
2824+
result.stdout.fnmatch_lines(
2825+
[
2826+
"collected 5 items",
2827+
"",
2828+
f"{p.name}: 5",
2829+
],
2830+
consecutive=True,
2831+
)
2832+
2833+
@staticmethod
2834+
def _initialize_files(
2835+
pytester: Pytester, verbosity: int, file_contents: str = DEFAULT_FILE_CONTENTS
2836+
) -> Path:
2837+
p = pytester.makepyfile(file_contents)
2838+
pytester.makeini(
2839+
f"""
2840+
[pytest]
2841+
verbosity_test_cases = {verbosity}
2842+
"""
2843+
)
2844+
return p
2845+
2846+
26142847
def test_summary_xfail_reason(pytester: Pytester) -> None:
26152848
pytester.makepyfile(
26162849
"""

0 commit comments

Comments
 (0)