diff --git a/Makefile b/Makefile index 7b1d1f9b7..e86b4a775 100644 --- a/Makefile +++ b/Makefile @@ -241,8 +241,7 @@ pytest_internal_parallel: # --global-coverage-infos-json=global-coverage-infos.json is to dump the coverage report in the file # --cov PATH is the directory PATH to consider for coverage. Default to SRC_DIR=src # --cov-fail-under=100 is to make the command fail if coverage does not reach a 100% -# --cov-report=term-missing:skip-covered is used to print the missing lines for coverage withtout -# taking into account skiped tests +# --cov-report=term-missing:skip-covered is used to avoid printing covered lines for all files .PHONY: pytest # Run pytest on all tests pytest: "$(MAKE)" pytest_internal_parallel \ @@ -279,6 +278,8 @@ pytest_no_flaky: check_current_flaky_tests # --cov PATH is the directory PATH to consider for coverage. Default to SRC_DIR=src # --cov-append is to make the coverage of the previous pytest run to also consider the tests that are # going to be re-executed by 'pytest_run_last_failed' +# --cov-fail-under=100 is to make the command fail if coverage does not reach a 100% +# --cov-report=term-missing:skip-covered is used to avoid printing covered lines for all files # --global-coverage-infos-json=global-coverage-infos.json is to dump the coverage report in the file # --last-failed runs all last failed tests # --last-failed-no-failures none' indicates pytest not to run anything (instead of running @@ -288,6 +289,8 @@ pytest_run_last_failed: poetry run pytest $(TEST) \ --cov=$(SRC_DIR) \ --cov-append \ + --cov-fail-under=100 \ + --cov-report=term-missing:skip-covered \ --global-coverage-infos-json=global-coverage-infos.json \ --last-failed \ --last-failed-no-failures none diff --git a/script/actions_utils/pytest_failed_test_report.py b/script/actions_utils/pytest_failed_test_report.py index 3d46ef5e0..7483e3902 100755 --- a/script/actions_utils/pytest_failed_test_report.py +++ b/script/actions_utils/pytest_failed_test_report.py @@ -98,8 +98,14 @@ def write_failed_tests_report( else: failed_tests_report["non_flaky"].append(test_name) # type: ignore[attr-defined] - # If no non-flaky tests failed, report that all failed tests were known flaky tests - if not failed_tests_report["non_flaky"]: + # If there are some flaky tests but no non-flaky tests failed, report that all failed tests + # were known flaky tests + # We need to make sure that at least one flaky test has been detected for one specific + # reason: if, for example, a test file has a syntax error, pytest will "crash" and therefore + # won't collect any tests in the file. The problem is that this will return an 'exitcode' + # of 1, making this script unexpectedly return 'all_failed_tests_are_flaky=True' in the + # case where 'failed_tests_report["non_flaky"]' is empty + if failed_tests_report["flaky"] and not failed_tests_report["non_flaky"]: failed_tests_report["all_failed_tests_are_flaky"] = True else: