Skip to content

Commit

Permalink
New verbosity_test_case ini option (pytest-dev#11653)
Browse files Browse the repository at this point in the history
Allow for the output of test case execution to be controlled independently from the application verbosity level. 

`verbosity_test_case` is the new ini setting to adjust this functionality.

Fix pytest-dev#11639
  • Loading branch information
plannigan authored and flying-sheep committed Apr 9, 2024
1 parent a5a737f commit 71a95be
Show file tree
Hide file tree
Showing 6 changed files with 273 additions and 9 deletions.
2 changes: 2 additions & 0 deletions changelog/11653.feature.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Added the new :confval:`verbosity_test_cases` configuration option for fine-grained control of test execution verbosity.
See :ref:`Fine-grained verbosity <pytest.fine_grained_verbosity>` for more details.
4 changes: 3 additions & 1 deletion doc/en/how-to/output.rst
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,9 @@ This is done by setting a verbosity level in the configuration file for the spec
``pytest --no-header`` with a value of ``2`` would have the same output as the previous example, but each test inside
the file is shown by a single character in the output.

(Note: currently this is the only option available, but more might be added in the future).
:confval:`verbosity_test_cases`: Controls how verbose the test execution output should be when pytest is executed.
Running ``pytest --no-header`` with a value of ``2`` would have the same output as the first verbosity example, but each
test inside the file gets its own line in the output.

.. _`pytest.detailed_failed_tests_usage`:

Expand Down
13 changes: 13 additions & 0 deletions doc/en/reference/reference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1865,6 +1865,19 @@ passed multiple times. The expected format is ``name=value``. For example::
"auto" can be used to explicitly use the global verbosity level.


.. confval:: verbosity_test_cases

Set a verbosity level specifically for test case execution related output, overriding the application wide level.

.. code-block:: ini
[pytest]
verbosity_test_cases = 2
Defaults to application wide verbosity level (via the ``-v`` command-line option). A special value of
"auto" can be used to explicitly use the global verbosity level.


.. confval:: xfail_strict

If set to ``True``, tests marked with ``@pytest.mark.xfail`` that actually succeed will by default fail the
Expand Down
2 changes: 2 additions & 0 deletions src/_pytest/config/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1657,6 +1657,8 @@ def getvalueorskip(self, name: str, path=None):

#: Verbosity type for failed assertions (see :confval:`verbosity_assertions`).
VERBOSITY_ASSERTIONS: Final = "assertions"
#: Verbosity type for test case execution (see :confval:`verbosity_test_cases`).
VERBOSITY_TEST_CASES: Final = "test_cases"
_VERBOSITY_INI_DEFAULT: Final = "auto"

def get_verbosity(self, verbosity_type: Optional[str] = None) -> int:
Expand Down
28 changes: 20 additions & 8 deletions src/_pytest/terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,14 @@ def pytest_addoption(parser: Parser) -> None:
"progress even when capture=no)",
default="progress",
)
Config._add_verbosity_ini(
parser,
Config.VERBOSITY_TEST_CASES,
help=(
"Specify a verbosity level for test case execution, overriding the main level. "
"Higher levels will provide more detailed information about each test case executed."
),
)


def pytest_configure(config: Config) -> None:
Expand Down Expand Up @@ -408,7 +416,7 @@ def no_summary(self) -> bool:
@property
def showfspath(self) -> bool:
if self._showfspath is None:
return self.verbosity >= 0
return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) >= 0
return self._showfspath

@showfspath.setter
Expand All @@ -417,7 +425,7 @@ def showfspath(self, value: Optional[bool]) -> None:

@property
def showlongtestinfo(self) -> bool:
return self.verbosity > 0
return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0

def hasopt(self, char: str) -> bool:
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
Expand Down Expand Up @@ -595,7 +603,7 @@ def pytest_runtest_logreport(self, report: TestReport) -> None:
markup = {"yellow": True}
else:
markup = {}
if self.verbosity <= 0:
if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0:
self._tw.write(letter, **markup)
else:
self._progress_nodeids_reported.add(rep.nodeid)
Expand All @@ -604,7 +612,7 @@ def pytest_runtest_logreport(self, report: TestReport) -> None:
self.write_ensure_prefix(line, word, **markup)
if rep.skipped or hasattr(report, "wasxfail"):
reason = _get_raw_skip_reason(rep)
if self.config.option.verbose < 2:
if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2:
available_width = (
(self._tw.fullwidth - self._tw.width_of_current_line)
- len(" [100%]")
Expand Down Expand Up @@ -641,7 +649,10 @@ def _is_last_item(self) -> bool:

def pytest_runtest_logfinish(self, nodeid: str) -> None:
assert self._session
if self.verbosity <= 0 and self._show_progress_info:
if (
self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0
and self._show_progress_info
):
if self._show_progress_info == "count":
num_tests = self._session.testscollected
progress_length = len(f" [{num_tests}/{num_tests}]")
Expand Down Expand Up @@ -819,8 +830,9 @@ def pytest_collection_finish(self, session: "Session") -> None:
rep.toterminal(self._tw)

def _printcollecteditems(self, items: Sequence[Item]) -> None:
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES)
if test_cases_verbosity < 0:
if test_cases_verbosity < -1:
counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
Expand All @@ -840,7 +852,7 @@ def _printcollecteditems(self, items: Sequence[Item]) -> None:
stack.append(col)
indent = (len(stack) - 1) * " "
self._tw.line(f"{indent}{col}")
if self.config.option.verbose >= 1:
if test_cases_verbosity >= 1:
obj = getattr(col, "obj", None)
doc = inspect.getdoc(obj) if obj else None
if doc:
Expand Down
233 changes: 233 additions & 0 deletions testing/test_terminal.py
Original file line number Diff line number Diff line change
Expand Up @@ -2611,6 +2611,239 @@ def test_format_trimmed() -> None:
assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) "


class TestFineGrainedTestCase:
DEFAULT_FILE_CONTENTS = """
import pytest
@pytest.mark.parametrize("i", range(4))
def test_ok(i):
'''
some docstring
'''
pass
def test_fail():
assert False
"""
LONG_SKIP_FILE_CONTENTS = """
import pytest
@pytest.mark.skip(
"some long skip reason that will not fit on a single line with other content that goes"
" on and on and on and on and on"
)
def test_skip():
pass
"""

@pytest.mark.parametrize("verbosity", [1, 2])
def test_execute_positive(self, verbosity, pytester: Pytester) -> None:
# expected: one test case per line (with file name), word describing result
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=verbosity)
result = pytester.runpytest(p)

result.stdout.fnmatch_lines(
[
"collected 5 items",
"",
f"{p.name}::test_ok[0] PASSED [ 20%]",
f"{p.name}::test_ok[1] PASSED [ 40%]",
f"{p.name}::test_ok[2] PASSED [ 60%]",
f"{p.name}::test_ok[3] PASSED [ 80%]",
f"{p.name}::test_fail FAILED [100%]",
],
consecutive=True,
)

def test_execute_0_global_1(self, pytester: Pytester) -> None:
# expected: one file name per line, single character describing result
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=0)
result = pytester.runpytest("-v", p)

result.stdout.fnmatch_lines(
[
"collecting ... collected 5 items",
"",
f"{p.name} ....F [100%]",
],
consecutive=True,
)

@pytest.mark.parametrize("verbosity", [-1, -2])
def test_execute_negative(self, verbosity, pytester: Pytester) -> None:
# expected: single character describing result
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=verbosity)
result = pytester.runpytest(p)

result.stdout.fnmatch_lines(
[
"collected 5 items",
"....F [100%]",
],
consecutive=True,
)

def test_execute_skipped_positive_2(self, pytester: Pytester) -> None:
# expected: one test case per line (with file name), word describing result, full reason
p = TestFineGrainedTestCase._initialize_files(
pytester,
verbosity=2,
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
)
result = pytester.runpytest(p)

result.stdout.fnmatch_lines(
[
"collected 1 item",
"",
f"{p.name}::test_skip SKIPPED (some long skip",
"reason that will not fit on a single line with other content that goes",
"on and on and on and on and on) [100%]",
],
consecutive=True,
)

def test_execute_skipped_positive_1(self, pytester: Pytester) -> None:
# expected: one test case per line (with file name), word describing result, reason truncated
p = TestFineGrainedTestCase._initialize_files(
pytester,
verbosity=1,
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
)
result = pytester.runpytest(p)

result.stdout.fnmatch_lines(
[
"collected 1 item",
"",
f"{p.name}::test_skip SKIPPED (some long ski...) [100%]",
],
consecutive=True,
)

def test_execute_skipped__0_global_1(self, pytester: Pytester) -> None:
# expected: one file name per line, single character describing result (no reason)
p = TestFineGrainedTestCase._initialize_files(
pytester,
verbosity=0,
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
)
result = pytester.runpytest("-v", p)

result.stdout.fnmatch_lines(
[
"collecting ... collected 1 item",
"",
f"{p.name} s [100%]",
],
consecutive=True,
)

@pytest.mark.parametrize("verbosity", [-1, -2])
def test_execute_skipped_negative(self, verbosity, pytester: Pytester) -> None:
# expected: single character describing result (no reason)
p = TestFineGrainedTestCase._initialize_files(
pytester,
verbosity=verbosity,
file_contents=TestFineGrainedTestCase.LONG_SKIP_FILE_CONTENTS,
)
result = pytester.runpytest(p)

result.stdout.fnmatch_lines(
[
"collected 1 item",
"s [100%]",
],
consecutive=True,
)

@pytest.mark.parametrize("verbosity", [1, 2])
def test__collect_only_positive(self, verbosity, pytester: Pytester) -> None:
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=verbosity)
result = pytester.runpytest("--collect-only", p)

result.stdout.fnmatch_lines(
[
"collected 5 items",
"",
f"<Dir {p.parent.name}>",
f" <Module {p.name}>",
" <Function test_ok[0]>",
" some docstring",
" <Function test_ok[1]>",
" some docstring",
" <Function test_ok[2]>",
" some docstring",
" <Function test_ok[3]>",
" some docstring",
" <Function test_fail>",
],
consecutive=True,
)

def test_collect_only_0_global_1(self, pytester: Pytester) -> None:
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=0)
result = pytester.runpytest("-v", "--collect-only", p)

result.stdout.fnmatch_lines(
[
"collecting ... collected 5 items",
"",
f"<Dir {p.parent.name}>",
f" <Module {p.name}>",
" <Function test_ok[0]>",
" <Function test_ok[1]>",
" <Function test_ok[2]>",
" <Function test_ok[3]>",
" <Function test_fail>",
],
consecutive=True,
)

def test_collect_only_negative_1(self, pytester: Pytester) -> None:
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=-1)
result = pytester.runpytest("--collect-only", p)

result.stdout.fnmatch_lines(
[
"collected 5 items",
"",
f"{p.name}::test_ok[0]",
f"{p.name}::test_ok[1]",
f"{p.name}::test_ok[2]",
f"{p.name}::test_ok[3]",
f"{p.name}::test_fail",
],
consecutive=True,
)

def test_collect_only_negative_2(self, pytester: Pytester) -> None:
p = TestFineGrainedTestCase._initialize_files(pytester, verbosity=-2)
result = pytester.runpytest("--collect-only", p)

result.stdout.fnmatch_lines(
[
"collected 5 items",
"",
f"{p.name}: 5",
],
consecutive=True,
)

@staticmethod
def _initialize_files(
pytester: Pytester, verbosity: int, file_contents: str = DEFAULT_FILE_CONTENTS
) -> Path:
p = pytester.makepyfile(file_contents)
pytester.makeini(
f"""
[pytest]
verbosity_test_cases = {verbosity}
"""
)
return p


def test_summary_xfail_reason(pytester: Pytester) -> None:
pytester.makepyfile(
"""
Expand Down

0 comments on commit 71a95be

Please sign in to comment.