diff --git a/.github/actions/run_opencl_cts/action.yml b/.github/actions/run_opencl_cts/action.yml index 396bab891..41780cfc1 100644 --- a/.github/actions/run_opencl_cts/action.yml +++ b/.github/actions/run_opencl_cts/action.yml @@ -34,11 +34,16 @@ runs: fi echo QEMU SETTING: $QEMU_SETTING set -x - echo > expect_fail.csv + # Build override file, all is done first, then the target specific. + # The last file can overwrite previous overrides. + for csv in .github/opencl_cts/override_all.csv .github/opencl_cts/override_${{ inputs.target }}.csv + do + [ -f $csv ] && cat $csv >> override.csv + done > override.csv + echo override file: + cat override.csv + # $CTS_FILTER ignores certain test, so is treated differently to temporary fails. - [ -f .github/opencl_cts/expect_fail_all.csv ] && cat .github/opencl_cts/expect_fail_all.csv >> expect_fail.csv - [ -f .github/opencl_cts/expect_fail_${{ inputs.target }}.csv ] && cat .github/opencl_cts/expect_fail_${{ inputs.target }}.csv >> expect_fail.csv - cat expect_fail.csv "$GITHUB_WORKSPACE/source/cl/scripts/$CTS_FILTER" > disable.csv # Note: use 'eval' built-in to handle quoting/escaping/splitting reqs RUN_CITIES="python3 -u $GITHUB_WORKSPACE/scripts/testing/run_cities.py -v \ --color=always --timeout $CTS_TIMEOUT \ @@ -49,23 +54,6 @@ runs: -e OCL_ICD_FILENAMES=$GITHUB_WORKSPACE/install_ock/lib/libCL.so \ -e CL_PLATFORM_INDEX=0 \ -s $GITHUB_WORKSPACE/test_conformance/$CTS_CSV_FILE \ - -i disable.csv" + -i $GITHUB_WORKSPACE/source/cl/scripts/$CTS_FILTER \ + -o override.csv" eval $RUN_CITIES - - - name: Run expected failed opencl cts - shell: bash - env: - CTS_TIMEOUT: 18:00:00 - run: | - echo "Running OpenCL CTS tests with CTS file $CTS_CSV_FILE with filter $CTS_FILTER" - set -x - RUN_CITIES="python3 -u $GITHUB_WORKSPACE/scripts/testing/run_cities.py -v \ - --color=always --timeout $CTS_TIMEOUT \ - $QEMU_SETTING \ - -b $GITHUB_WORKSPACE/test_conformance \ - -L $GITHUB_WORKSPACE/install_icd/lib \ - -e CLC_EXECUTABLE=$GITHUB_WORKSPACE/install_ock/bin/clc \ - -e OCL_ICD_FILENAMES=$GITHUB_WORKSPACE/install_ock/lib/libCL.so \ - -e CL_PLATFORM_INDEX=0 \ - -s expect_fail.csv" - eval $RUN_CITIES || echo failed as expected diff --git a/.github/actions/run_sycl_cts/action.yml b/.github/actions/run_sycl_cts/action.yml index 780cf221b..26967740e 100644 --- a/.github/actions/run_sycl_cts/action.yml +++ b/.github/actions/run_sycl_cts/action.yml @@ -67,12 +67,18 @@ runs: export LD_LIBRARY_PATH=$GITHUB_WORKSPACE/install_dpcpp/lib:$GITHUB_WORKSPACE/install_ock/lib export ONEAPI_DEVICE_SELECTOR=opencl:0 export CTS_CSV_FILE=$GITHUB_WORKSPACE/.github/scripts/sycl-cts.csv - echo > expect_fail.csv + echo > override.csv # $CTS_FILTER ignores certain test, so is treated differently to temporary fails. - [ -f .github/sycl_cts/expect_fail_all.csv ] && cat .github/sycl_cts/expect_fail_all.csv >> expect_fail.csv - [ -f .github/sycl_cts/expect_fail_${{ inputs.target }}.csv ] && cat .github/sycl_cts/expect_fail_${{ inputs.target }}.csv >> expect_fail.csv - cp expect_fail.csv disable.csv - [ -f "$SYCL_CTS_FILTER" ] && cat "$SYCL_CTS_FILTER" >> disable.csv + + # Build override file, all is done first, then the target specific. The last file can overwrite prevous overrides. + for csv in .github/sycl_cts/override_all.csv ..github/sycl_cts/override_${{ inputs.target }}.csv + do + [ -f $csv ] && cat $csv + done > override.csv + + echo override file: + cat override.csv + python3 $GITHUB_WORKSPACE/scripts/testing/run_cities.py \ --color=always \ --timeout $SYCL_CTS_TIMEOUT \ @@ -85,30 +91,11 @@ runs: -l SYCL-CTS/cts.log -f SYCL-CTS/cts.fail \ -r SYCL-CTS/cts.xml \ -v \ - -i disable.csv || exitcode=$? + -o override.csv \ + $SYCL_CTS_FILTER + export OCL_ICD_FILENAMES=$GITHUB_WORKSPACE/install_ock/lib/libCL.so $GITHUB_WORKSPACE/.github/scripts/create_sycl_cts_test_lists.sh $PREPEND_PATH SYCL-CTS $CTS_CSV_FILE csv.txt cts_all.txt # output a diff of the generated list csv.txt and cts_all.txt diff csv.txt cts_all.txt || echo "WARNING - Missing some tests from sycl cts file based on test_all --list-tests - see > above" exit $exitcode - - - name: run sycl cts expected fails - shell: bash - env: - PREPEND_PATH: '' # TODO: have qemu as input and set up this - SYCL_CTS_TIMEOUT: '02:00:00' - run: | - echo running sycl cts - export LD_LIBRARY_PATH=$GITHUB_WORKSPACE/install_dpcpp/lib:$GITHUB_WORKSPACE/install_ock/lib - export ONEAPI_DEVICE_SELECTOR=opencl:0 - python3 $GITHUB_WORKSPACE/scripts/testing/run_cities.py \ - --color=always \ - --timeout $SYCL_CTS_TIMEOUT \ - $PREPEND_PATH \ - -p sycl_cts \ - -b SYCL-CTS/bin \ - -L SYCL-CTS/lib \ - -e OCL_ICD_FILENAMES=$GITHUB_WORKSPACE/install_ock/lib/libCL.so \ - -l SYCL-CTS/cts.log -f SYCL-CTS/cts.fail \ - -r SYCL-CTS/cts.xml \ - -s expect_fail.csv || echo failed as expected diff --git a/.github/opencl_cts/expect_fail_all.csv b/.github/opencl_cts/expect_fail_all.csv deleted file mode 100644 index b2098fddd..000000000 --- a/.github/opencl_cts/expect_fail_all.csv +++ /dev/null @@ -1 +0,0 @@ -API,api/test_api diff --git a/.github/opencl_cts/expect_fail_host_riscv64_linux.csv b/.github/opencl_cts/expect_fail_host_riscv64_linux.csv deleted file mode 100644 index 7c78bc659..000000000 --- a/.github/opencl_cts/expect_fail_host_riscv64_linux.csv +++ /dev/null @@ -1 +0,0 @@ -Math,math_brute_force/test_bruteforce -w diff --git a/.github/opencl_cts/override_all.csv b/.github/opencl_cts/override_all.csv new file mode 100644 index 000000000..51566cdca --- /dev/null +++ b/.github/opencl_cts/override_all.csv @@ -0,0 +1 @@ +API,api/test_api,Xfail diff --git a/.github/opencl_cts/override_host_riscv64_linux.csv b/.github/opencl_cts/override_host_riscv64_linux.csv new file mode 100644 index 000000000..6b8c9e266 --- /dev/null +++ b/.github/opencl_cts/override_host_riscv64_linux.csv @@ -0,0 +1 @@ +Math,math_brute_force/test_bruteforce -w,Xfail diff --git a/.github/sycl_cts/expect_fail_all.csv b/.github/sycl_cts/expect_fail_all.csv deleted file mode 100644 index d5f5bd439..000000000 --- a/.github/sycl_cts/expect_fail_all.csv +++ /dev/null @@ -1,2 +0,0 @@ -SYCL_CTS,test_math_builtin_api "math_builtin_float_base_*" -SYCL_CTS,test_math_builtin_api "math_builtin_float_double_*" diff --git a/.github/sycl_cts/override_all.csv b/.github/sycl_cts/override_all.csv new file mode 100644 index 000000000..612f05741 --- /dev/null +++ b/.github/sycl_cts/override_all.csv @@ -0,0 +1,3 @@ +SYCL_CTS,test_math_builtin_api "math_builtin_float_base_*",Xfail +SYCL_CTS,test_math_builtin_api "math_builtin_float_double_*",Xfail +SYCL_CTS,test_event "event::wait does not report asynchronous errors",Mayfail diff --git a/.github/workflows/planned_testing_caller.yml b/.github/workflows/planned_testing_caller.yml index 2e7903e01..120aa45bf 100644 --- a/.github/workflows/planned_testing_caller.yml +++ b/.github/workflows/planned_testing_caller.yml @@ -23,12 +23,14 @@ jobs: if: github.repository == 'uxlfoundation/oneapi-construction-kit' || github.event_name != 'schedule' uses: ./.github/workflows/planned_testing.yml with: - target_list: '[ "host_x86_64_linux", - "host_aarch64_linux", - "host_riscv64_linux", - "host_i686_linux", - "host_refsi_linux", - "host_x86_64_windows" ]' + # target_list: '[ "host_x86_64_linux", + # "host_aarch64_linux", + # "host_riscv64_linux", + # "host_i686_linux", + # "host_refsi_linux", + # "host_x86_64_windows" ]' + target_list: '[ "host_x86_64_linux"]' + #', "host_aarch64_linux", "host_riscv64_linux", "host_i686_linux", "host_refsi_linux", "host_x86_64_windows" ]' ock: true test_tornado: true test_sycl_cts: true @@ -37,7 +39,7 @@ jobs: # in a reasonable time # The following can be used to download from a previous workflow run (change id) - # download_ock_artefact: host_x86_64_linux=12915462445;host_aarch64_linux=12915462445 - # download_dpcpp_artefact: host_x86_64_linux=12915462445;host_aarch64_linux=12915462445 - # download_sycl_cts_artefact: host_x86_64_linux=12915462445;host_aarch64_linux=12915462445 + download_ock_artefact: host_x86_64_linux=13442977877;host_aarch64_linux=13442977877 + download_dpcpp_artefact: host_x86_64_linux=13442977877;host_aarch64_linux=13442977877 + download_sycl_cts_artefact: host_x86_64_linux=13442977877;host_aarch64_linux=13442977877 pull_request: ${{ github.event_name == 'pull_request' }} diff --git a/doc/scripts/city_runner.rst b/doc/scripts/city_runner.rst index d95b5d7ec..5414dbe70 100644 --- a/doc/scripts/city_runner.rst +++ b/doc/scripts/city_runner.rst @@ -612,6 +612,16 @@ Other Options test list. Tests marked as ``Ignored`` are not run and not counted. This is only supported for the CTS and GTest profiles at the moment. +``--override-source``, ``-o`` + A csv file containing a list of tests which will override tests in the test + csv provided by `-s`. These will override if they match the first two values + of an entry in the csv file. This is useful for updating the attribute or pool + values (the 3rd and 4th optional values). For example if we have an + expected fail, we can add an `Xfail` element. + + Note this works in order so if there are more than one line in the override + matching the test csv file it will choose the last one. + CSV File Format --------------- @@ -654,6 +664,16 @@ Unimplemented associated with tests which have been left unimplemented by the CTS, or test things that aren't in CL 1.x. +Xfail + Tests marked with the ``Xfail`` attribute are run and expected to fail. Fails + are counted in the pass rate and showed up in number of expected fails. It will only + count towards a failing exit code if it unexpectedly passes. +Mayfail + Tests marked with the ``Mayfail`` attribute are run and are allowed to fail. + This should usually be reserved for those tests that intermittently fail. + Fails are counted in the pass rate and show up in may fails. It will + not count towards a failing exit code. + .. _pools: Pools diff --git a/scripts/testing/city_runner/profile.py b/scripts/testing/city_runner/profile.py index 8dbe109dc..374c81b7e 100644 --- a/scripts/testing/city_runner/profile.py +++ b/scripts/testing/city_runner/profile.py @@ -121,6 +121,11 @@ def add_options(self, parser): "--ignored-source", default="", help="File containing a list of ignored tests to skip.") + parser.add_argument( + "-o", + "--override-source", + default="", + help="File containing a list of tests which if match the first 2 columns will override, in order.") parser.add_argument( "-l", "--log-file", type=str, help="File to log test output to") parser.add_argument( @@ -225,7 +230,7 @@ def create_run(self, test, worker_state=None): """ Create a new test run from a test description. """ raise NotImplementedError() - def load_tests(self, csv_paths, disabled_path, ignored_path): + def load_tests(self, csv_paths, disabled_path, ignored_path, override_path): """ Create the list of tests to run from a CSV. """ if not csv_paths or any(not csv_path or not os.path.exists(csv_path) for csv_path in csv_paths): raise Exception("Test list file not specified or does not exist") @@ -233,8 +238,10 @@ def load_tests(self, csv_paths, disabled_path, ignored_path): raise Exception("Disabled test list file does not exist") if ignored_path and not os.path.exists(ignored_path): raise Exception("Ignored test list file does not exist") + if override_path and not os.path.exists(override_path): + raise Exception("Override test list file does not exist") tests = (TestList - .from_file(csv_paths, disabled_path, ignored_path, self.args.test_prefix) + .from_file(csv_paths, disabled_path, ignored_path, override_path, self.args.test_prefix) .filter(self.args.patterns)) return tests diff --git a/scripts/testing/city_runner/profiles/basic.py b/scripts/testing/city_runner/profiles/basic.py index 036cdad61..53efc7084 100644 --- a/scripts/testing/city_runner/profiles/basic.py +++ b/scripts/testing/city_runner/profiles/basic.py @@ -100,7 +100,7 @@ def build_environment_vars(self): return env - def load_tests(self, csv_paths, disabled_path, ignored_path): + def load_tests(self, csv_paths, disabled_path, ignored_path, override_path): """ Find the list of tests from CSV. """ if disabled_path: print("Warning: disabled list not supported for basic profile") @@ -108,6 +108,9 @@ def load_tests(self, csv_paths, disabled_path, ignored_path): if ignored_path: print("Warning: ignored list not supported for basic profile") + if override_path: + print("Warning: override list not supported for basic profile") + parsed_tests = [] # Load tests from CSV if any were provided if csv_paths: diff --git a/scripts/testing/city_runner/profiles/gtest.py b/scripts/testing/city_runner/profiles/gtest.py index 7da47b798..3eae73e59 100644 --- a/scripts/testing/city_runner/profiles/gtest.py +++ b/scripts/testing/city_runner/profiles/gtest.py @@ -195,10 +195,12 @@ def parse_gtest_csv(self, csv_file): return test_names - def load_tests(self, csv_paths, disabled_path, ignored_path): + def load_tests(self, csv_paths, disabled_path, ignored_path, override_path): """ Find the list of tests from CSV or fallback to gtest binary. """ if disabled_path: print("Warning: disabled list not supported for gtest profile") + if override_path: + print("Warning: override list not supported for gtest profile") executable = TestExecutable(self.args.binary_name, self.args.binary_name) diff --git a/scripts/testing/city_runner/profiles/tensorflow.py b/scripts/testing/city_runner/profiles/tensorflow.py index ba288200e..3d0e29375 100644 --- a/scripts/testing/city_runner/profiles/tensorflow.py +++ b/scripts/testing/city_runner/profiles/tensorflow.py @@ -118,7 +118,7 @@ def parse_options(self, argv): return args - def load_tests(self, csv_paths, disabled_path, ignored_path): + def load_tests(self, csv_paths, disabled_path, ignored_path, override_path): """ Find the list of tests from CSV or fallback to Tensorflow binary. """ @@ -128,6 +128,9 @@ def load_tests(self, csv_paths, disabled_path, ignored_path): if ignored_path: print("Warning: ignored list not supported for tensorflow profile") + if override_path: + print("Warning: override list not supported for tensorflow profile") + # Find path to Tensorflow executable Tensorflow_exe_path = os.path.abspath(self.args.binary_path) Tensorflow_dir = os.path.dirname(Tensorflow_exe_path) diff --git a/scripts/testing/city_runner/runner.py b/scripts/testing/city_runner/runner.py index 1d80ffc59..fd603802d 100644 --- a/scripts/testing/city_runner/runner.py +++ b/scripts/testing/city_runner/runner.py @@ -121,9 +121,11 @@ def execute(self): test_source = self.args.test_source # CSV disabled_source = self.args.disabled_source # Disabled CSV ignored_source = self.args.ignored_source # Ignored CSV + override_source = self.args.override_source # Override CSV self.tests = self.profile.load_tests(test_source, disabled_source, - ignored_source) + ignored_source, + override_source) if self.args.repeat > 1: self.tests *= self.args.repeat self.num_tests = len(self.tests) @@ -260,8 +262,11 @@ def process_results(self): # Return the city runner exit code. if self.aborted: return 130 - if self.results.num_fails and not self.args.relaxed: - return 1 + if not self.args.relaxed: + if self.results.num_fails > 0: + return 1 + if self.results.num_xfail_unexpectedly_passed > 0: + return 1 return 0 def process_output(self, run): diff --git a/scripts/testing/city_runner/test_info.py b/scripts/testing/city_runner/test_info.py index 42ab6c21a..b550625cd 100644 --- a/scripts/testing/city_runner/test_info.py +++ b/scripts/testing/city_runner/test_info.py @@ -103,7 +103,7 @@ def __mul__(self, x): return TestList(new_tests, self.executables) @classmethod - def from_file(cls, list_file_paths, disabled_file_path, ignored_file_path, prefix=""): + def from_file(cls, list_file_paths, disabled_file_path, ignored_file_path, override_file_path, prefix=""): """ Load a list of tests from a CTS list file. """ tests = [] disabled_tests = [] @@ -125,7 +125,15 @@ def from_file(cls, list_file_paths, disabled_file_path, ignored_file_path, prefi line for line in stripped if line and not line.startswith("#")) chunked = csv.reader(filtered) filter_tests.extend(json.dumps(chunks) for chunks in chunked) - + # override_matches + override_tests = [] + if override_file_path: + with open(override_file_path, "r") as f: + stripped = (line.strip() for line in f) + filtered = ( + line for line in stripped if line and not line.startswith("#")) + chunked = csv.reader(filtered) + override_tests.extend(chunks for chunks in chunked) for list_file_path in list_file_paths: with open(list_file_path, "r") as f: stripped = (line.strip() for line in f) @@ -138,11 +146,20 @@ def from_file(cls, list_file_paths, disabled_file_path, ignored_file_path, prefi device_filter = chunks.pop(0) if len(chunks) < 2: raise Exception("Not enough columns in the CSV file") + + # match on the first 2 + for o in override_tests: + if chunks[:2] == o[:2]: + chunks = o argv = chunks[1].strip() serialized = json.dumps(chunks) ignored = serialized in ignored_tests disabled = serialized in disabled_tests unimplemented = False + xfail = False + mayfail = False + + pool = Pool.NORMAL if len(chunks) >= 4: @@ -156,12 +173,16 @@ def from_file(cls, list_file_paths, disabled_file_path, ignored_file_path, prefi if len(chunks) >= 3: attribute = chunks[2] - if attribute == 'Ignore': + if attribute.casefold() == 'Ignore'.casefold(): ignored = True - elif attribute == 'Disabled': + elif attribute.casefold() == 'Disabled'.casefold(): disabled = True - elif attribute == 'Unimplemented': + elif attribute.casefold() == 'Unimplemented'.casefold(): unimplemented = True + elif attribute.casefold() == 'Xfail'.casefold(): + xfail = True + elif attribute.casefold() == 'Mayfail'.casefold(): + mayfail = True elif attribute: raise Exception( "Unknown attribute '%s'" % attribute) @@ -204,6 +225,8 @@ def from_file(cls, list_file_paths, disabled_file_path, ignored_file_path, prefi test.ignore = ignored test.disabled = disabled test.unimplemented = unimplemented + test.xfail = xfail + test.mayfail = mayfail # Tests with predetermined pools based on resource usage if test.match("allocations") or test.match("integer_ops"): @@ -265,6 +288,9 @@ def __init__(self, tests): self.num_tests = len(tests) self.num_passes = 0 self.num_fails = 0 + self.num_xfail_unexpectedly_passed = 0 + self.num_xfail_expected_fails = 0 + self.num_mayfail_fails = 0 self.num_skipped = 0 self.num_timeouts = 0 self.num_passes_cts = 0 @@ -290,14 +316,30 @@ def add_run(self, run): """ Add a test run to the list of results. """ self.runs[run.test.name] = run run.num = len(self.runs) - if run.status == "PASS": - self.num_passes += 1 - elif run.status == "SKIP": + + if run.status == "SKIP": self.num_skipped += 1 elif run.status == "TIMEOUT": self.num_timeouts += 1 - else: + + if run.test.xfail: + if run.status == "PASS": + self.num_xfail_unexpectedly_passed += 1 + run.status="XFAIL_UNEXPECTEDLY_PASSED" + elif run.status == "FAIL": + self.num_xfail_expected_fails += 1 + run.status="XFAIL_EXPECTEDLY_FAILED" + elif run.test.mayfail: + if run.status == "FAIL": + self.num_mayfail_fails += 1 + run.status="MAYFAIL_FAILED" + elif run.status == "PASS": + self.num_passes += 1 + elif run.status == "PASS": + self.num_passes += 1 + elif run.status == "FAIL": self.num_fails += 1 + if run.total_tests is not None: self.num_total_cts += run.total_tests if run.passed_tests is not None: @@ -317,6 +359,9 @@ def finish(self, profile): not_runs = [] self.fail_list = [] self.timeout_list = [] + self.xfail_unexpectedly_passed_list = [] + self.may_fail_failed_list = [] + for test in self.tests: try: run = self.runs[test.name] @@ -327,6 +372,10 @@ def finish(self, profile): self.fail_list.append(run) elif run.status == "TIMEOUT": self.timeout_list.append(run) + elif run.status == "XFAIL_UNEXPECTEDLY_PASSED": + self.xfail_unexpectedly_passed_list.append(run) + elif run.status == "MAYFAIL_FAILED": + self.may_fail_failed_list.append(run) for test in not_runs: run = profile.create_run(test) run.status = "FAIL" @@ -338,6 +387,8 @@ def finish(self, profile): self.add_run(run) self.fail_list.append(run) self.fail_list.sort(key=lambda r: r.test.name) + self.xfail_unexpectedly_passed_list.sort(key=lambda r: r.test.name) + self.may_fail_failed_list.sort(key=lambda r: r.test.name) def write_junit(self, out, suite_name): """ Print results to the Junit XML file for reading by Jenkins.""" diff --git a/scripts/testing/city_runner/ui.py b/scripts/testing/city_runner/ui.py index fceb97f6b..7e708fc82 100644 --- a/scripts/testing/city_runner/ui.py +++ b/scripts/testing/city_runner/ui.py @@ -99,6 +99,9 @@ def print_test_output(self, run, results): def print_results(self, results): pass_rate = self.calc_progress(results.num_passes, results.num_tests, 1) + xfail_exp_fail_rate = self.calc_progress(results.num_xfail_expected_fails, results.num_tests, 1) + xfail_unexp_pass_rate = self.calc_progress(results.num_xfail_unexpectedly_passed, results.num_tests, 1) + mayfail_fail_rate = self.calc_progress(results.num_mayfail_fails, results.num_tests, 1) fail_rate = self.calc_progress(results.num_fails, results.num_tests, 1) timeout_rate = self.calc_progress(results.num_timeouts, results.num_tests, 1) skip_rate = self.calc_progress(results.num_skipped, results.num_tests, 1) @@ -112,6 +115,18 @@ def print_results(self, results): self.out.write(" %s\n" % run.test.name) self.out.write("\n") + if results.xfail_unexpectedly_passed_list: + self.out.write(self.fmt.red("Unexpected passing XFail tests:\n")) + for run in results.xfail_unexpectedly_passed_list: + self.out.write(" %s\n" % run.test.name) + self.out.write("\n") + + if results.may_fail_failed_list: + self.out.write(self.fmt.red("May Fail failing tests:\n")) + for run in results.may_fail_failed_list: + self.out.write(" %s\n" % run.test.name) + self.out.write("\n") + if results.timeout_list: self.out.write(self.fmt.blue("Timeout tests:\n")) for run in results.timeout_list: @@ -133,17 +148,25 @@ def print_results(self, results): self.out.write(self.fmt.white("Finished in ")) self.out.write("%s\n" % duration) # Print test figures. - self.out.write(self.fmt.green("\nPassed: ")) + self.out.write(self.fmt.green("\nPassed expectedly: ")) self.out.write("%6d (%5.1f %%)\n" % (results.num_passes, pass_rate)) - self.out.write(self.fmt.red("Failed: ")) + self.out.write(self.fmt.red("Failed unexpectedly:")) self.out.write("%6d (%5.1f %%)\n" % (results.num_fails, fail_rate)) - self.out.write(self.fmt.blue("Timeouts: ")) + if results.num_xfail_unexpectedly_passed > 0: + self.out.write(self.fmt.red("Passing unexpectedly:")) + self.out.write("%5d (%5.1f %%)\n" % (results.num_xfail_unexpectedly_passed, xfail_unexp_pass_rate)) + if results.num_xfail_expected_fails > 0: + self.out.write("Failed expectedly: %5d (%5.1f %%)\n" % (results.num_xfail_expected_fails, xfail_exp_fail_rate)) + if results.num_mayfail_fails > 0: + self.out.write(self.fmt.red("Failing may fail: ")) + self.out.write("%4d (%5.1f %%)\n" % (results.num_mayfail_fails, mayfail_fail_rate)) + self.out.write(self.fmt.blue("Timeouts: ")) self.out.write("%6d (%5.1f %%)\n" % (results.num_timeouts, timeout_rate)) - self.out.write(self.fmt.yellow("Skipped: ")) + self.out.write(self.fmt.yellow("Skipped: ")) self.out.write("%6d (%5.1f %%)\n" % (results.num_skipped, skip_rate)) - self.out.write(self.fmt.white("Overall Pass: ")) + self.out.write(self.fmt.white("Overall Pass: ")) self.out.write("%6d (%5.1f %%)\n" % (results.num_passes_cts, cts_rate)) - self.out.write(self.fmt.white("Overall Fail: ")) + self.out.write(self.fmt.white("Overall Fail: ")) self.out.write("%6d (%5.1f %%)\n" % (results.num_total_cts - results.num_passes_cts, cts_fail_rate))