diff --git a/doc/develop/test/pytest.rst b/doc/develop/test/pytest.rst index e644882191ef7d..f3db6fcee89c6a 100644 --- a/doc/develop/test/pytest.rst +++ b/doc/develop/test/pytest.rst @@ -56,6 +56,16 @@ Pytest scans the given locations looking for tests, following its default `discovery rules `_ One can also pass some extra arguments to the pytest from yaml file using ``pytest_args`` keyword under ``harness_config``, e.g.: ``pytest_args: [‘-k=test_method’, ‘--log-level=DEBUG’]``. +There is also an option to pass ``--pytest-args`` through Twister command line parameters. +This can be particularly useful when one wants to select a specific testcase from a test suite. +For instance, one can use a command: + +.. code-block:: console + + $ ./scripts/twister --platform native_sim -T samples/subsys/testsuite/pytest/shell \ + -s samples/subsys/testsuite/pytest/shell/sample.pytest.shell \ + --pytest-args='-k test_shell_print_version' + Helpers & fixtures ================== diff --git a/scripts/pylib/twister/twisterlib/environment.py b/scripts/pylib/twister/twisterlib/environment.py index 86965f1f9cfbad..b7e11406cd4d5d 100644 --- a/scripts/pylib/twister/twisterlib/environment.py +++ b/scripts/pylib/twister/twisterlib/environment.py @@ -216,6 +216,11 @@ def add_parse_arguments(parser = None): and 'fifo_loop' is a name of a function found in main.c without test prefix. """) + parser.add_argument("--pytest-args", + help="""Pass additional arguments to the pytest subprocess. This parameter + will override the pytest_args from the harness_config in YAML file. + """) + valgrind_asan_group.add_argument( "--enable-valgrind", action="store_true", help="""Run binary through valgrind and check for several memory access diff --git a/scripts/pylib/twister/twisterlib/harness.py b/scripts/pylib/twister/twisterlib/harness.py index 052def7162a812..dece1673c7a9ec 100644 --- a/scripts/pylib/twister/twisterlib/harness.py +++ b/scripts/pylib/twister/twisterlib/harness.py @@ -309,8 +309,9 @@ def pytest_run(self, timeout): def generate_command(self): config = self.instance.testsuite.harness_config + handler: Handler = self.instance.handler pytest_root = config.get('pytest_root', ['pytest']) if config else ['pytest'] - pytest_args = config.get('pytest_args', []) if config else [] + pytest_args_yaml = config.get('pytest_args', []) if config else [] pytest_dut_scope = config.get('pytest_dut_scope', None) if config else None command = [ 'pytest', @@ -324,12 +325,19 @@ def generate_command(self): ] command.extend([os.path.normpath(os.path.join( self.source_dir, os.path.expanduser(os.path.expandvars(src)))) for src in pytest_root]) - command.extend(pytest_args) + + if handler.options.pytest_args: + command.append(handler.options.pytest_args) + if pytest_args_yaml: + logger.warning(f'The pytest_args ({handler.options.pytest_args}) specified ' + 'in the command line will override the pytest_args defined ' + f'in the YAML file {pytest_args_yaml}') + else: + command.extend(pytest_args_yaml) + if pytest_dut_scope: command.append(f'--dut-scope={pytest_dut_scope}') - handler: Handler = self.instance.handler - if handler.options.verbose > 1: command.extend([ '--log-cli-level=DEBUG', @@ -489,6 +497,9 @@ def _parse_report_file(self, report): tc.status = 'error' tc.reason = elem.get('message') tc.output = elem.text + else: + self.state = 'skipped' + self.instance.reason = 'No tests collected' class Gtest(Harness): diff --git a/scripts/tests/twister/pytest_integration/test_harness_pytest.py b/scripts/tests/twister/pytest_integration/test_harness_pytest.py index 150980059b3d7d..fc60b99e0d13ee 100644 --- a/scripts/tests/twister/pytest_integration/test_harness_pytest.py +++ b/scripts/tests/twister/pytest_integration/test_harness_pytest.py @@ -25,6 +25,7 @@ def testinstance() -> TestInstance: testinstance.handler = mock.Mock() testinstance.handler.options = mock.Mock() testinstance.handler.options.verbose = 1 + testinstance.handler.options.pytest_args = None testinstance.handler.type_str = 'native' return testinstance @@ -67,6 +68,18 @@ def test_pytest_command_extra_args(testinstance: TestInstance): assert c in command +def test_pytest_command_extra_args_in_options(testinstance: TestInstance): + pytest_harness = Pytest() + pytest_args_from_yaml = '-k test_from_yaml' + pytest_args_from_cmd = '-k test_from_cmd' + testinstance.testsuite.harness_config['pytest_args'] = [pytest_args_from_yaml] + testinstance.handler.options.pytest_args = pytest_args_from_cmd + pytest_harness.configure(testinstance) + command = pytest_harness.generate_command() + assert pytest_args_from_cmd in command + assert pytest_args_from_yaml not in command + + @pytest.mark.parametrize( ('pytest_root', 'expected'), [ @@ -222,3 +235,56 @@ def test_skip_2(): assert len(testinstance.testcases) == 2 for tc in testinstance.testcases: assert tc.status == "skipped" + + +def test_if_report_with_filter(pytester, testinstance: TestInstance): + test_file_content = textwrap.dedent(""" + import pytest + def test_A(): + pass + def test_B(): + pass + """) + test_file = pytester.path / 'test_filter.py' + test_file.write_text(test_file_content) + report_file = pytester.path / 'report.xml' + result = pytester.runpytest( + str(test_file), + '-k', 'test_B', + f'--junit-xml={str(report_file)}' + ) + result.assert_outcomes(passed=1) + assert report_file.is_file() + + pytest_harness = Pytest() + pytest_harness.configure(testinstance) + pytest_harness.report_file = report_file + pytest_harness._update_test_status() + assert pytest_harness.state == "passed" + assert testinstance.status == "passed" + assert len(testinstance.testcases) == 1 + + +def test_if_report_with_no_collected(pytester, testinstance: TestInstance): + test_file_content = textwrap.dedent(""" + import pytest + def test_A(): + pass + """) + test_file = pytester.path / 'test_filter.py' + test_file.write_text(test_file_content) + report_file = pytester.path / 'report.xml' + result = pytester.runpytest( + str(test_file), + '-k', 'test_B', + f'--junit-xml={str(report_file)}' + ) + result.assert_outcomes(passed=0) + assert report_file.is_file() + + pytest_harness = Pytest() + pytest_harness.configure(testinstance) + pytest_harness.report_file = report_file + pytest_harness._update_test_status() + assert pytest_harness.state == "skipped" + assert testinstance.status == "skipped"