diff --git a/Containerfile b/Containerfile
index 925c933..597bb62 100644
--- a/Containerfile
+++ b/Containerfile
@@ -11,8 +11,6 @@ ENV VSE_DIR=/usr/vse
RUN mkdir -p ${VSE_DIR}
WORKDIR ${VSE_DIR}
-RUN git clone -v --depth=1 https://github.com/redhat-partner-solutions/testdrive.git
-
RUN git clone -v --depth=1 https://github.com/redhat-partner-solutions/vse-sync-test-report.git
RUN git clone -v --depth=1 https://github.com/redhat-partner-solutions/vse-sync-test.git
diff --git a/cmd/e2e.sh b/cmd/e2e.sh
index 6ec250f..1251d3d 100755
--- a/cmd/e2e.sh
+++ b/cmd/e2e.sh
@@ -14,7 +14,7 @@ COLLECTORPATH=$TESTROOT/vse-sync-collection-tools
ANALYSERPATH=$TESTROOT/vse-sync-test
REPORTGENPATH=$TESTROOT/vse-sync-test-report
REPORTPRIVSUTGENPATH=$TESTROOT/vse-sync-sut
-TDPATH=$TESTROOT/testdrive/src
+TDPATH=$ANALYSERPATH/testdrive/src
PPPATH=$ANALYSERPATH/postprocess/src
OUTPUTDIR=$TESTROOT/data
@@ -143,7 +143,6 @@ audit_container() {
{
"vse-sync-collection-tools": $(audit_repo $COLLECTORPATH),
"vse-sync-test": $(audit_repo $ANALYSERPATH),
- "testdrive": $(audit_repo $TDPATH),
"vse-sync-test-report": $(audit_repo $REPORTGENPATH)
}
EOF
@@ -254,13 +253,13 @@ create_pdf() {
}
EOF
- env PYTHONPATH=$TDPATH make CONFIG=$config JUNIT=$FULLJUNIT OBJ=$REPORTARTEFACTDIR BUILDER=native GIT_HASH=$(echo "$SYNCTESTCOMMIT" | head -c 8) clean
+ make CONFIG=$config JUNIT=$FULLJUNIT OBJ=$REPORTARTEFACTDIR BUILDER=native GIT_HASH=$(echo "$SYNCTESTCOMMIT" | head -c 8) clean
if [ -d "$REPORTPRIVSUTGENPATH" ];
then
- env PYTHONPATH=$TDPATH make CONFIG=$config ATTRIBUTES="allow-uri-read" JUNIT=$FULLJUNIT OBJ=$REPORTARTEFACTDIR BUILDER=native GIT_HASH=$(echo "$SYNCTESTCOMMIT" | head -c 8) ADOC=$REPORTPRIVSUTGENPATH/doc/setup.adoc PNG=$REPORTPRIVSUTGENPATH/doc/testreport.png all
+ make CONFIG=$config ATTRIBUTES="allow-uri-read" JUNIT=$FULLJUNIT OBJ=$REPORTARTEFACTDIR BUILDER=native GIT_HASH=$(echo "$SYNCTESTCOMMIT" | head -c 8) ADOC=$REPORTPRIVSUTGENPATH/doc/setup.adoc PNG=$REPORTPRIVSUTGENPATH/doc/testreport.png all
else
- env PYTHONPATH=$TDPATH make CONFIG=$config ATTRIBUTES="allow-uri-read" JUNIT=$FULLJUNIT OBJ=$REPORTARTEFACTDIR BUILDER=native GIT_HASH=$(echo "$SYNCTESTCOMMIT" | head -c 8) all
+ make CONFIG=$config ATTRIBUTES="allow-uri-read" JUNIT=$FULLJUNIT OBJ=$REPORTARTEFACTDIR BUILDER=native GIT_HASH=$(echo "$SYNCTESTCOMMIT" | head -c 8) all
fi
mv $REPORTARTEFACTDIR/test-report.pdf $FINALREPORTPATH
diff --git a/testdrive/README.md b/testdrive/README.md
new file mode 100644
index 0000000..6014dc6
--- /dev/null
+++ b/testdrive/README.md
@@ -0,0 +1,332 @@
+# testdrive
+
+`testdrive` is a library for:
+
+ * Building a URI for a test case from a base URI and the test case path
+ * Generating JUnit test results for supplying to [Red Hat DCI][1] or other CI
+ * Generating [Asciidoc][3] test results for human consumption
+
+The implementation of [testdrive.run.main()][2] provides an illustration of how
+this library can be used.
+
+## testdrive.run
+
+Module `testdrive.run` is a convenience tool for running a set of tests
+specified as lines of JSON in a file. Using example files in this repo:
+
+ $ env PYTHONPATH=src python3 -m testdrive.run https://github.com/redhat-partner-solutions/testdrive/ examples/sequence/tests.json
+ {"result": false, "reason": "something went wrong", "data": {"foo": "bar"}, "argv": [], "id": "https://github.com/redhat-partner-solutions/testdrive/A/", "timestamp": "2023-08-25T07:22:57.368206+00:00", "time": 0.056209}
+ {"result": true, "reason": null, "data": {"baz": 99}, "argv": [], "id": "https://github.com/redhat-partner-solutions/testdrive/B/", "timestamp": "2023-08-25T07:22:57.424912+00:00", "time": 0.058858}
+ {"result": false, "reason": "no particular reason", "argv": [], "id": "https://github.com/redhat-partner-solutions/testdrive/C/", "timestamp": "2023-08-25T07:22:57.483833+00:00", "time": 0.005414}
+
+Alternatively, tests can be supplied on stdin. In this case option `--basedir`
+must be supplied. Using example files in this repo:
+
+ $ cat examples/sequence/tests.json | env PYTHONPATH=src python3 -m testdrive.run --basedir=examples/sequence/ https://github.com/redhat-partner-solutions/testdrive/ -
+ {"result": false, "reason": "something went wrong", "data": {"foo": "bar"}, "argv": [], "id": "https://github.com/redhat-partner-solutions/testdrive/A/", "timestamp": "2023-08-25T07:25:49.818848+00:00", "time": 0.029972}
+ {"result": true, "reason": null, "data": {"baz": 99}, "argv": [], "id": "https://github.com/redhat-partner-solutions/testdrive/B/", "timestamp": "2023-08-25T07:25:49.848893+00:00", "time": 0.028337}
+ {"result": false, "reason": "no particular reason", "argv": [], "id": "https://github.com/redhat-partner-solutions/testdrive/C/", "timestamp": "2023-08-25T07:25:49.877293+00:00", "time": 0.003946}
+
+`testdrive.run` can also be instructed to call a script colocated with a test
+implementation to plot images of input data / results. Option `--imagedir` must
+be supplied to generate images. Option `--plotter` gives the name of the script
+to call: if there is not a script with this name colocated with the test
+implementation, then plotting is skipped for this image. Using example files in
+this repo:
+
+ $ find examples/ -name 'plot*'
+ examples/sequence/B/plot.sh
+ examples/sequence/C/plot.sh
+
+(These scripts only print examples of expected JSON, they do not create images.)
+
+ $ cat examples/sequence/tests.json | env PYTHONPATH=src python3 -mtestdrive.run --basedir=examples/sequence/ --imagedir=. --plotter=plot.sh https://github.com/redhat-partner-solutions/testdrive/ -
+ {"result": false, "reason": "something went wrong", "data": {"foo": "bar"}, "argv": [], "id": "https://github.com/redhat-partner-solutions/testdrive/A/", "timestamp": "2023-09-04T15:31:30.336801+00:00", "time": 0.029548}
+ {"result": true, "reason": null, "data": {"baz": 99}, "argv": [], "id": "https://github.com/redhat-partner-solutions/testdrive/B/", "timestamp": "2023-09-04T15:31:30.366548+00:00", "time": 0.090166, "plot": [{"path": "./B_testimpl.png", "title": "foo bar baz"}]}
+ {"result": false, "reason": "no particular reason", "argv": [], "id": "https://github.com/redhat-partner-solutions/testdrive/C/", "timestamp": "2023-09-04T15:31:30.460420+00:00", "time": 0.003882, "plot": [{"path": "./C_test.png"}, "./C_test_lhs.pdf", {"path": "./C_test_rhs.pdf", "title": "rhs"}]}
+
+## testdrive.junit
+
+Module `testdrive.junit` can be used to generate JUnit test results from lines
+of JSON (one line per test case result):
+
+ $ python3 -m testdrive.run https://github.com/redhat-partner-solutions/testdrive/ examples/sequence/tests.json | \
+ python3 -m testdrive.junit --prettify "examples.sequence" -
+
+
+
+
+
+ {
+ "argv": [],
+ "data": {
+ "foo": "bar"
+ },
+ "reason": "something went wrong",
+ "result": false
+ }
+
+
+
+
+
+ {
+ "argv": [],
+ "data": {
+ "baz": 99
+ },
+ "reason": null,
+ "result": true
+ }
+
+
+
+
+
+
+ {
+ "argv": [],
+ "reason": "no particular reason",
+ "result": false
+ }
+
+
+
+
+
+
+
+## testdrive.xml
+
+Module `testdrive.xml` provides a basic XML validator. This, along with the
+schema file `junit/schema/testdrive.xsd`, is provided in order to test the
+output from `testdrive.junit` and to allow comparison with Windy Road JUnit
+schema `junit/schema/junit.xsd`.
+
+The following JUnit output from testdrive...
+
+ $ cat results.xml
+
+
+
+
+
+ {
+ "argv": [],
+ "data": {
+ "foo": "bar"
+ },
+ "reason": "something went wrong",
+ "result": false
+ }
+
+
+
+
+
+ {
+ "argv": [],
+ "data": {
+ "baz": 99
+ },
+ "reason": null,
+ "result": true
+ }
+
+
+
+
+
+
+ {
+ "argv": [],
+ "reason": "no particular reason",
+ "result": false
+ }
+
+
+
+
+
+
+
+...validates using the testdrive JUnit schema...
+
+ $ python3 -m testdrive.xml junit/schema/testdrive.xsd results.xml
+
+
+
+
+
+ {
+ "argv": [],
+ "data": {
+ "foo": "bar"
+ },
+ "reason": "something went wrong",
+ "result": false
+ }
+
+
+
+
+
+ {
+ "argv": [],
+ "data": {
+ "baz": 99
+ },
+ "reason": null,
+ "result": true
+ }
+
+
+
+
+
+
+ {
+ "argv": [],
+ "reason": "no particular reason",
+ "result": false
+ }
+
+
+
+
+
+
+
+...and _does not_ validate using the Windy Road JUnit schema:
+
+ $ python3 -m testdrive.xml --verbose junit/schema/junit.xsd results.xml
+
+ failed validating {'tests': '3', 'errors': '0', 'failures': '2', 'skipped': '0'} with XsdAttributeGroup():
+
+ Reason: 'tests' attribute not allowed for element
+
+ Schema:
+
+
+
+
+
+
+
+
+
+ Derived from testsuite/@name in the non-aggregated documents
+
+
+
+
+ Starts at '0' for the first testsuite and is incremented by 1 for each following testsuite
+
+
+
+
+
+
+
+
+
+ Instance:
+
+
+
+
+
+ {
+ "argv": [],
+ "data": {
+ "foo": "bar"
+ },
+ "reason": "something went wrong",
+ "result": false
+ }
+
+
+
+
+
+ {
+ "argv": [],
+ "data": {
+ ...
+ ...
+
+
+ Path: /testsuites
+
+To see the differences between the schemas, simply use diff:
+
+ $ diff junit/schema/junit.xsd junit/schema/testdrive.xsd
+ 7c7,16
+ < JUnit test result schema for the Apache Ant JUnit and JUnitReport tasks
+ ---
+ >
+ > A schema for testdrive JUnit test results.
+ >
+ > testdrive emits test results which are not strictly compatible with the Windy
+ > Road JUnit schema (because the CI systems which consume these results are not
+ > strictly compatible). This schema is a modified version of the Windy Road JUnit
+ > schema, for which the original text in this element is retained below.
+ >
+ > -----
+ > JUnit test result schema for the Apache Ant JUnit and JUnitReport tasks
+ 11a21,36
+ >
+ >
+ >
+ >
+ >
+ >
+ >
+ >
+ >
+ >
+ ...
+
+## testdrive.asciidoc
+
+Module `testdrive.asciidoc` can be used to generate [Asciidoc][3] test results
+from lines of JSON (one line per test case result):
+
+ $ python3 -m testdrive.run https://github.com/redhat-partner-solutions/testdrive/ examples/sequence/tests.json | \
+ python3 -m testdrive.asciidoc "examples.sequence" - | tee results.adoc
+ === Test Suite: examples.sequence
+
+ ==== Summary
+
+ [cols=2*.^a]
+ |===
+
+
+ |
+ *hostname*
+ |
+ _not known_
+
+ |
+ *started*
+ |
+ 2023-07-31T13:29:08.844977+00:00
+ ...
+
+To include this in a simple report:
+
+ $ cat report.adoc
+ = Test Report
+
+ :toc:
+
+ == Test Results
+
+ <<<
+ include::results.adoc[]
+
+ $ asciidoctor -a toc report.adoc && firefox report.html
+
+[1]: https://www.distributed-ci.io/
+[2]: https://github.com/redhat-partner-solutions/testdrive/blob/cce8fb30bd8eed8e83f53665cd1433e20c81cfd3/src/testdrive/run.py#L60
+[3]: https://docs.asciidoctor.org/asciidoc/latest/
diff --git a/testdrive/examples/sequence/A/testimpl.py b/testdrive/examples/sequence/A/testimpl.py
new file mode 100755
index 0000000..57872fa
--- /dev/null
+++ b/testdrive/examples/sequence/A/testimpl.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python3
+
+import json
+
+print(
+ json.dumps(
+ {
+ "result": False,
+ "reason": "something went wrong",
+ "data": {
+ "foo": "bar",
+ },
+ }
+ )
+)
diff --git a/testdrive/examples/sequence/B/plot.sh b/testdrive/examples/sequence/B/plot.sh
new file mode 100755
index 0000000..2cf29a2
--- /dev/null
+++ b/testdrive/examples/sequence/B/plot.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+printf '[{"path": "%s", "title": "foo bar baz"}]' "${1}.png"
diff --git a/testdrive/examples/sequence/B/testimpl.py b/testdrive/examples/sequence/B/testimpl.py
new file mode 100755
index 0000000..b0a479d
--- /dev/null
+++ b/testdrive/examples/sequence/B/testimpl.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python3
+
+import json
+
+print(
+ json.dumps(
+ {
+ "result": True,
+ "reason": None,
+ "data": {
+ "baz": 99,
+ },
+ }
+ )
+)
diff --git a/testdrive/examples/sequence/C/plot.sh b/testdrive/examples/sequence/C/plot.sh
new file mode 100755
index 0000000..1b6223f
--- /dev/null
+++ b/testdrive/examples/sequence/C/plot.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+printf '[{"path": "%s"}, "%s", {"path": "%s", "title": "rhs"}]' "${1}.png" "${1}_lhs.pdf" "${1}_rhs.pdf"
diff --git a/testdrive/examples/sequence/C/test.sh b/testdrive/examples/sequence/C/test.sh
new file mode 100755
index 0000000..4fc7b85
--- /dev/null
+++ b/testdrive/examples/sequence/C/test.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+cat - <&2
+echo bar
+echo baz >&2
+echo quux
+exit 7
+echo corge >&2
diff --git a/testdrive/junit/examples/minimal.xml b/testdrive/junit/examples/minimal.xml
new file mode 100644
index 0000000..3f3c832
--- /dev/null
+++ b/testdrive/junit/examples/minimal.xml
@@ -0,0 +1,2 @@
+
+
diff --git a/testdrive/junit/schema/junit-10.xsd b/testdrive/junit/schema/junit-10.xsd
new file mode 100644
index 0000000..286fbf7
--- /dev/null
+++ b/testdrive/junit/schema/junit-10.xsd
@@ -0,0 +1,147 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/testdrive/junit/schema/junit-4.xsd b/testdrive/junit/schema/junit-4.xsd
new file mode 100644
index 0000000..488b92b
--- /dev/null
+++ b/testdrive/junit/schema/junit-4.xsd
@@ -0,0 +1,92 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/testdrive/junit/schema/junit.xsd b/testdrive/junit/schema/junit.xsd
new file mode 100644
index 0000000..7b81dff
--- /dev/null
+++ b/testdrive/junit/schema/junit.xsd
@@ -0,0 +1,227 @@
+
+
+
+ JUnit test result schema for the Apache Ant JUnit and JUnitReport tasks
+Copyright © 2011, Windy Road Technology Pty. Limited
+The Apache Ant JUnit XML Schema is distributed under the terms of the Apache License Version 2.0 http://www.apache.org/licenses/
+Permission to waive conditions of this license may be requested from Windy Road Support (http://windyroad.org/support).
+
+
+
+
+
+
+
+
+
+ Contains an aggregation of testsuite results
+
+
+
+
+
+
+
+
+
+ Derived from testsuite/@name in the non-aggregated documents
+
+
+
+
+ Starts at '0' for the first testsuite and is incremented by 1 for each following testsuite
+
+
+
+
+
+
+
+
+
+
+
+ Contains the results of executing a testsuite
+
+
+
+
+ Properties (e.g., environment settings) set during test execution
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Indicates that the test was skipped.
+
+
+
+
+
+
+ The message specifying why the test case was skipped
+
+
+
+
+
+
+
+
+ Indicates that the test errored. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test. Contains as a text node relevant data for the error, e.g., a stack trace
+
+
+
+
+
+
+ The error message. e.g., if a java exception is thrown, the return value of getMessage()
+
+
+
+
+ The type of error that occured. e.g., if a java execption is thrown the full class name of the exception.
+
+
+
+
+
+
+
+
+ Indicates that the test failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals. Contains as a text node relevant data for the failure, e.g., a stack trace
+
+
+
+
+
+
+ The message specified in the assert
+
+
+
+
+ The type of the assert.
+
+
+
+
+
+
+
+
+
+ Name of the test method
+
+
+
+
+ Full class name for the class the test method is in.
+
+
+
+
+ Time taken (in seconds) to execute the test
+
+
+
+
+
+
+ Data that was written to standard out while the test was executed
+
+
+
+
+
+
+
+
+
+ Data that was written to standard error while the test was executed
+
+
+
+
+
+
+
+
+
+
+ Full class name of the test for non-aggregated testsuite documents. Class name without the package for aggregated testsuites documents
+
+
+
+
+
+
+
+
+
+ when the test was executed. Timezone may not be specified.
+
+
+
+
+ Host on which the tests were executed. 'localhost' should be used if the hostname cannot be determined.
+
+
+
+
+
+
+
+
+
+ The total number of tests in the suite
+
+
+
+
+ The total number of tests in the suite that failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals
+
+
+
+
+ The total number of tests in the suite that errored. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test.
+
+
+
+
+ The total number of ignored or skipped tests in the suite.
+
+
+
+
+ Time taken (in seconds) to execute the tests in the suite
+
+
+
+
+
+
+
+
+
diff --git a/testdrive/junit/schema/schemas.md b/testdrive/junit/schema/schemas.md
new file mode 100644
index 0000000..488dc48
--- /dev/null
+++ b/testdrive/junit/schema/schemas.md
@@ -0,0 +1,7 @@
+# junit schemas
+
+Original sources:
+
+ * [junit/schema/junit-10.xsd](https://github.com/jenkinsci/xunit-plugin/blob/xunit-2.3.2/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd)
+ * [junit/schema/junit-4.xsd](https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model/src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd)
+ * [junit/schema/junit.xsd](https://github.com/windyroad/JUnit-Schema/blob/master/JUnit.xsd)
diff --git a/testdrive/junit/schema/testdrive.xsd b/testdrive/junit/schema/testdrive.xsd
new file mode 100644
index 0000000..04489d6
--- /dev/null
+++ b/testdrive/junit/schema/testdrive.xsd
@@ -0,0 +1,233 @@
+
+
+
+
+ A schema for testdrive JUnit test results.
+
+testdrive emits test results which are not strictly compatible with the Windy
+Road JUnit schema (because the CI systems which consume these results are not
+strictly compatible). This schema is a modified version of the Windy Road JUnit
+schema, for which the original text in this element is retained below.
+
+-----
+JUnit test result schema for the Apache Ant JUnit and JUnitReport tasks
+Copyright © 2011, Windy Road Technology Pty. Limited
+The Apache Ant JUnit XML Schema is distributed under the terms of the Apache License Version 2.0 http://www.apache.org/licenses/
+Permission to waive conditions of this license may be requested from Windy Road Support (http://windyroad.org/support).
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Contains an aggregation of testsuite results
+
+
+
+
+
+
+
+
+
+
+ The total number of tests in the suite
+
+
+
+
+ The total number of tests in the suite that failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals
+
+
+
+
+ The total number of tests in the suite that errored. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test.
+
+
+
+
+ The total number of ignored or skipped tests in the suite.
+
+
+
+
+
+
+ Contains the results of executing a testsuite
+
+
+
+
+
+
+
+
+ Indicates that the test was skipped.
+
+
+
+
+
+
+ The message specifying why the test case was skipped
+
+
+
+
+
+
+
+
+ Indicates that the test errored. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test. Contains as a text node relevant data for the error, e.g., a stack trace
+
+
+
+
+
+
+ The error message. e.g., if a java exception is thrown, the return value of getMessage()
+
+
+
+
+ The type of error that occured. e.g., if a java execption is thrown the full class name of the exception.
+
+
+
+
+
+
+
+
+ Indicates that the test failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals. Contains as a text node relevant data for the failure, e.g., a stack trace
+
+
+
+
+
+
+ The message specified in the assert
+
+
+
+
+ The type of the assert.
+
+
+
+
+
+
+
+
+
+ Name of the test method
+
+
+
+
+ Full class name for the class the test method is in.
+
+
+
+
+ Time taken (in seconds) to execute the test
+
+
+
+
+
+
+
+
+ Data that was written to standard out while the test was executed
+
+
+
+
+
+
+
+
+
+
+
+ Full class name of the test for non-aggregated testsuite documents. Class name without the package for aggregated testsuites documents
+
+
+
+
+
+
+
+
+
+
+ when the test was executed. Timezone may not be specified.
+
+
+
+
+ Host on which the tests were executed. 'localhost' should be used if the hostname cannot be determined.
+
+
+
+
+
+
+
+
+
+ The total number of tests in the suite
+
+
+
+
+ The total number of tests in the suite that failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals
+
+
+
+
+ The total number of tests in the suite that errored. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test.
+
+
+
+
+ The total number of ignored or skipped tests in the suite.
+
+
+
+
+ Time taken (in seconds) to execute the tests in the suite
+
+
+
+
+
+
+
+
+
diff --git a/testdrive/src/testdrive/__init__.py b/testdrive/src/testdrive/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/testdrive/src/testdrive/cases.py b/testdrive/src/testdrive/cases.py
new file mode 100644
index 0000000..af8e93e
--- /dev/null
+++ b/testdrive/src/testdrive/cases.py
@@ -0,0 +1,57 @@
+### SPDX-License-Identifier: GPL-2.0-or-later
+
+"""Test case analysis"""
+
+from .run import timevalue
+
+
+def timing(cases):
+ """Return (timestamp, duration) for test `cases`.
+
+ If `cases` contain no timing information return (None, None). Otherwise
+ return the ISO 8601 string for the earliest timestamp in `cases` for
+ `timestamp`; the total number of seconds from this timestamp to the end of
+ test execution for `duration`.
+ """
+ # result variables
+ timestamp = duration = None
+ # working variables
+ tv_start = tv_end = duration_last = None
+ for case in cases:
+ if "timestamp" in case:
+ try:
+ tv_case = timevalue(case["timestamp"])
+ except TypeError:
+ # timestamp is not an ISO format string
+ # skip decimal relative timestamps
+ continue
+ if tv_start is None:
+ timestamp = case["timestamp"]
+ tv_start = tv_end = tv_case
+ duration_last = case["duration"]
+ elif tv_case < tv_start:
+ timestamp = case["timestamp"]
+ tv_start = tv_case
+ elif tv_end < tv_case:
+ tv_end = tv_case
+ duration_last = case["duration"]
+ if tv_start is not None:
+ duration = (tv_end - tv_start).total_seconds() + duration_last
+ duration = round(duration, 6)
+ return (timestamp, duration)
+
+
+def summarize(cases):
+ """Return a dict of summary statistics counters for test `cases`."""
+ total = len(cases)
+ errors = sum(1 for case in cases if case["result"] == "error")
+ failures = sum(1 for case in cases if case["result"] is False)
+ (timestamp, duration) = timing(cases)
+ return {
+ "total": total,
+ "success": total - (errors + failures),
+ "failure": failures,
+ "error": errors,
+ "timestamp": timestamp,
+ "duration": duration,
+ }
diff --git a/testdrive/src/testdrive/common.py b/testdrive/src/testdrive/common.py
new file mode 100644
index 0000000..0ba6b57
--- /dev/null
+++ b/testdrive/src/testdrive/common.py
@@ -0,0 +1,37 @@
+### SPDX-License-Identifier: GPL-2.0-or-later
+
+"""Common code for command line tools"""
+
+import sys
+from contextlib import nullcontext
+
+
+def open_input(filename, encoding="utf-8", **kwargs):
+ """Return a context manager for reading from `filename`.
+
+ If `filename` is '-' then read from stdin instead of `filename`.
+ """
+ if filename == "-":
+ return nullcontext(sys.stdin)
+ return open(filename, encoding=encoding, **kwargs)
+
+
+def print_line(line, flush=True):
+ """Print `line` and, optionally, `flush` stdout.
+
+ If SIGPIPE is received then set `sys.stdout` to None and return False:
+ otherwise return True.
+
+ The Python recommendation suggests this clean up on SIGPIPE:
+ https://docs.python.org/3/library/signal.html#note-on-sigpipe
+
+ However this code uses setting `sys.stdout` to None as per:
+ https://stackoverflow.com/questions/26692284/\
+ how-to-prevent-brokenpipeerror-when-doing-a-flush-in-python
+ """
+ try:
+ print(line, flush=flush)
+ return True
+ except BrokenPipeError:
+ sys.stdout = None
+ return False
diff --git a/testdrive/src/testdrive/junit/__init__.py b/testdrive/src/testdrive/junit/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/testdrive/src/testdrive/junit/create.py b/testdrive/src/testdrive/junit/create.py
new file mode 100644
index 0000000..6199c49
--- /dev/null
+++ b/testdrive/src/testdrive/junit/create.py
@@ -0,0 +1,295 @@
+### SPDX-License-Identifier: GPL-2.0-or-later
+
+"""Generate JUnit output"""
+
+from argparse import ArgumentParser
+import json
+
+from xml.etree import ElementTree as ET
+
+from ..cases import summarize
+from ..common import open_input
+from ..uri import UriBuilder
+
+
+def _buildattrs(**kwargs):
+ """Return a dict from `kwargs` suitable for creating an XML element with."""
+ attrs = {}
+ for key, val in kwargs.items():
+ if val is not None:
+ attrs[key] = str(val)
+ return attrs
+
+
+def _testsuites(
+ tests,
+ errors,
+ failures,
+ skipped=0,
+ time=None,
+):
+ """Return XML testsuites element, root element.
+
+ `tests` is the number of tests run;
+ `errors` the number of tests which did not return a result;
+ `failures` the number of tests which returned a failure result;
+ `skipped` the number of tests not run;
+ `time` the elapsed time to run all tests.
+ """
+ attrs = _buildattrs(
+ tests=tests,
+ errors=errors,
+ failures=failures,
+ skipped=skipped,
+ time=time,
+ )
+ return ET.Element("testsuites", attrs)
+
+
+def _testsuite(
+ suite,
+ tests,
+ errors,
+ failures,
+ skipped=0,
+ hostname=None,
+ timestamp=None,
+ time=None,
+): # pylint: disable=too-many-arguments
+ """Return XML testsuite element, a container for test cases.
+
+ `suite` is the name of the test suite;
+ `tests` is the number of tests run;
+ `errors` the number of tests which did not return a result;
+ `failures` the number of tests which returned a failure result;
+ `skipped` the number of tests not run;
+ `hostname` the name of the host which ran the tests;
+ `timestamp` the ISO 8601 datetime when the suite was run;
+ `time` the elapsed time to run all tests.
+ """
+ attrs = _buildattrs(
+ name=suite,
+ tests=tests,
+ errors=errors,
+ failures=failures,
+ skipped=skipped,
+ hostname=hostname,
+ timestamp=timestamp,
+ time=time,
+ )
+ return ET.Element("testsuite", attrs)
+
+
+def _testcase(
+ suite,
+ case,
+ time=None,
+):
+ """Return XML testcase element.
+
+ `suite` is the name of the test suite;
+ `case` is the name of the test case;
+ `time` the elapsed time to run the test.
+ """
+ attrs = _buildattrs(
+ classname=suite,
+ name=case,
+ time=time,
+ )
+ return ET.Element("testcase", attrs)
+
+
+def _error(message):
+ """Return XML error element.
+
+ `message` is the error reason. (Only the first line will be included.)
+ """
+ attrs = _buildattrs(
+ type="Error",
+ message=message.split("\n", 1)[0],
+ )
+ return ET.Element("error", attrs)
+
+
+def _failure(message):
+ """Return XML failure element.
+
+ `message` is the failure reason. (Only the first line will be included.)
+ """
+ attrs = _buildattrs(
+ type="Failure",
+ message=message.split("\n", 1)[0],
+ )
+ return ET.Element("failure", attrs)
+
+
+def _system_out(case, exclude=()):
+ """Return XML system-out element.
+
+ Include `case` as a pretty-printed JSON-encoded object,
+ having omitted pairs for keys in `exclude`.
+ """
+ elem = ET.Element("system-out")
+ elem.text = json.dumps(
+ {k: v for (k, v) in case.items() if k not in exclude},
+ sort_keys=True,
+ indent=4,
+ )
+ return elem
+
+
+def _properties(*args):
+ """Return XML properties element.
+
+ Include a sub-element for each property (name, value) in `args`.
+ """
+ elem = ET.Element("properties")
+ for name, value in args:
+ elem.append(ET.Element("property", name=name, value=str(value)))
+ return elem
+
+
+def junit(
+ suite,
+ cases,
+ hostname=None,
+ exclude=(),
+ baseurl_ids=None,
+ baseurl_specs=None,
+ prettify=False,
+):
+ """Return JUnit output for test `cases` in `suite`.
+
+ `suite` is the string name of the test suite;
+ `cases` is a sequence of dict, with each dict defining test case result and
+ metadata;
+ `hostname` the name of the host which ran the tests;
+ `exclude` is a sequence of keys to omit from the JSON object in system-out;
+ `baseurl_ids` is the base URL for test ids;
+ `baseurl_specs` is the base URL for test specifications;
+ if `prettify` then indent XML output.
+
+ Each case must supply values for keys:
+ id - the test URI
+ result - a boolean test result or "error" (no result produced)
+ reason - string reason describing test failure or error
+
+ Each case may supply values for keys:
+ timestamp - ISO 8601 string of UTC time when the test was started
+ duration - test duration in seconds
+
+ If `timestamp` is supplied then `duration` must also be supplied.
+
+ If both `baseurl_ids` and `baseurl_specs` are supplied then add a property
+ element for 'test_specification' for each case. The element text is a URL
+ formed by substituting `baseurl_specs` for the base (prefix) of the case
+ 'id' (which must be `baseurl_ids`).
+ """
+ uri_builder = None
+ # always ensure base URLs are valid
+ if baseurl_ids:
+ UriBuilder(baseurl_ids)
+ if baseurl_specs:
+ UriBuilder(baseurl_specs)
+ # only use base URLs if both are supplied
+ if baseurl_ids and baseurl_specs:
+ uri_builder = UriBuilder(baseurl_ids)
+ summary = summarize(cases)
+ tests = summary["total"]
+ errors = summary["error"]
+ failures = summary["failure"]
+ timestamp = summary["timestamp"]
+ time_total = summary["duration"]
+ e_root = _testsuites(tests, errors, failures)
+ e_suite = _testsuite(
+ suite,
+ tests,
+ errors,
+ failures,
+ hostname=hostname,
+ timestamp=timestamp,
+ time=time_total,
+ )
+ for case in cases:
+ e_case = _testcase(suite, case["id"], time=case.get("duration"))
+ if case["result"] is False:
+ e_case.append(_failure(case["reason"]))
+ elif case["result"] == "error":
+ e_case.append(_error(case["reason"]))
+ elif case["result"] is not True:
+ raise ValueError(f"""bad result "{case['result']}" for case {case['id']}""")
+ e_case.append(_system_out(case, exclude=exclude))
+ properties = [("test_id", case["id"])]
+ if uri_builder:
+ testspec_url = uri_builder.rebase(case["id"], baseurl_specs)
+ properties.append(("test_specification", testspec_url))
+ e_case.append(_properties(*properties))
+ e_suite.append(e_case)
+ e_root.append(e_suite)
+ if prettify:
+ ET.indent(e_root)
+ return ET.tostring(e_root, encoding="unicode", xml_declaration=True)
+
+
+def main():
+ """Generate JUnit output for test cases.
+
+ Build JUnit output for the test cases in input. Print the JUnit output to
+ stdout. Each line of input must contain a JSON object specifying the test
+ id, result and other metadata for a single test case.
+ """
+ aparser = ArgumentParser(description=main.__doc__)
+ aparser.add_argument(
+ "--hostname",
+ help=" ".join(
+ (
+ "The name of the host which ran the tests.",
+ "(Used in JUnit output when supplied.)",
+ )
+ ),
+ )
+ aparser.add_argument(
+ "--exclude",
+ nargs="*",
+ default=("id",),
+ help="Omit pairs for these keys from the JSON object in ",
+ )
+ aparser.add_argument(
+ "--prettify",
+ action="store_true",
+ help="pretty print XML output",
+ )
+ aparser.add_argument(
+ "--baseurl-ids",
+ help="The base URL which test ids are relative to.",
+ )
+ aparser.add_argument(
+ "--baseurl-specs",
+ help="The base URL which test specifications are relative to.",
+ )
+ aparser.add_argument(
+ "suite",
+ help="The name of the test suite. (Used in JUnit output.)",
+ )
+ aparser.add_argument(
+ "input",
+ help="input file, or '-' to read from stdin",
+ )
+ args = aparser.parse_args()
+ with open_input(args.input) as fid:
+ cases = tuple(json.loads(line) for line in fid)
+ print(
+ junit(
+ args.suite,
+ cases,
+ args.hostname,
+ args.exclude,
+ args.baseurl_ids,
+ args.baseurl_specs,
+ args.prettify,
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testdrive/src/testdrive/junit/merge.py b/testdrive/src/testdrive/junit/merge.py
new file mode 100644
index 0000000..86ab778
--- /dev/null
+++ b/testdrive/src/testdrive/junit/merge.py
@@ -0,0 +1,76 @@
+### SPDX-License-Identifier: GPL-2.0-or-later
+
+"""Merge JUnit output"""
+
+from argparse import ArgumentParser
+from datetime import timedelta
+
+from xml.etree import ElementTree as ET
+
+from ..run import timevalue
+
+
+def combine(attrs, e_suite):
+ """Combine attribute values from `e_suite` into `attrs`."""
+ for name in ("tests", "errors", "failures", "skipped"):
+ count = int(e_suite.get(name, 0))
+ try:
+ attrs[name] += count
+ except KeyError:
+ attrs[name] = count
+ timestamp = e_suite.get("timestamp")
+ duration = float(e_suite.get("time", 0))
+ if timestamp:
+ try:
+ # time value when [c]ombined testing [b]egan
+ tv_cb = timevalue(attrs["timestamp"])
+ # time value when [c]ombined testing [f]inished
+ tv_cf = tv_cb + timedelta(seconds=attrs["time"])
+ except KeyError:
+ attrs["timestamp"] = timestamp
+ attrs["time"] = duration
+ else:
+ # time value when this [s]uite [b]egan
+ tv_sb = timevalue(timestamp)
+ # time value when this [s]uite [f]inished
+ tv_sf = tv_sb + timedelta(seconds=duration)
+ if tv_sb < tv_cb:
+ attrs["timestamp"] = timestamp
+ if tv_sf < tv_cf:
+ attrs["time"] += (tv_cb - tv_sb).total_seconds()
+ else:
+ attrs["time"] = duration
+ elif tv_cf < tv_sf:
+ attrs["time"] += (tv_sf - tv_cf).total_seconds()
+
+
+def main():
+ """Merge JUnit files and print the output to stdout."""
+ aparser = ArgumentParser(description=main.__doc__)
+ aparser.add_argument(
+ "--prettify",
+ action="store_true",
+ help="pretty print XML output",
+ )
+ aparser.add_argument(
+ "inputs",
+ nargs="+",
+ help="input files",
+ )
+ args = aparser.parse_args()
+ attrs = {}
+ e_suites = []
+ for filename in args.inputs:
+ for e_suite in ET.parse(filename).getroot().iter("testsuite"):
+ e_suites.append(e_suite)
+ combine(attrs, e_suite)
+ e_root = ET.Element("testsuites", {k: str(v) for (k, v) in attrs.items()})
+ for e_suite in e_suites:
+ e_root.append(e_suite)
+ if args.prettify:
+ ET.indent(e_root)
+ print(ET.tostring(e_root, encoding="unicode", xml_declaration=True))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testdrive/src/testdrive/run.py b/testdrive/src/testdrive/run.py
new file mode 100644
index 0000000..5fb6dab
--- /dev/null
+++ b/testdrive/src/testdrive/run.py
@@ -0,0 +1,167 @@
+### SPDX-License-Identifier: GPL-2.0-or-later
+
+"""Run tests"""
+
+import json
+from argparse import ArgumentParser
+import sys
+import os
+import subprocess
+from datetime import datetime, timezone
+
+from .common import open_input, print_line
+from .source import Source, sequence
+from .uri import UriBuilder
+
+
+def drive(test, *test_args):
+ """Execute `test` and return a result dict.
+
+ If `test` exits with error or outputs to stderr, then the result dict
+ will contain string 'error' at key 'result' and a string at key 'reason'.
+
+ Otherwise the result dict contains whatever `test` outputs to stdout. This
+ output is always expected to be a JSON object with pairs for 'result',
+ 'reason' and other pairs appropriate for `test`.
+
+ The result dict always contains `test_args` at key 'argv'.
+ """
+ subp = subprocess.run(
+ (test,) + test_args,
+ capture_output=True,
+ check=False,
+ env=os.environ,
+ )
+ if not subp.returncode and not subp.stderr:
+ dct = json.loads(subp.stdout)
+ else:
+ reason = f"{test} exited with code {subp.returncode}"
+ if subp.stderr:
+ reason += "\n\n"
+ reason += subp.stderr.decode()
+ dct = {"result": "error", "reason": reason}
+ dct["argv"] = test_args
+ return dct
+
+
+def plot(plotter, prefix, *test_args):
+ """Execute `plotter` and return a sequence of images output.
+
+ If `plotter` exits with error or outputs to stderr, then raise RuntimeError.
+
+ Otherwise the sequence of images output contains whatever `plotter` outputs
+ to stdout. This output is always expected to be a JSON array whose items are
+ either a string, the path to an image filename, or an object with pairs for
+ 'path', the path to an image filename, and optionally a string 'title'. Each
+ image output by `plotter` is expected to use `prefix` as the path and stem
+ for the output filename.
+ """
+ subp = subprocess.run(
+ (plotter, prefix) + test_args,
+ capture_output=True,
+ check=False,
+ env=os.environ,
+ )
+ if not subp.returncode and not subp.stderr:
+ return json.loads(subp.stdout)
+ reason = f"{plotter} exited with code {subp.returncode}:"
+ reason += "\n\n"
+ reason += subp.stderr.decode()
+ raise RuntimeError(reason)
+
+
+def timenow():
+ """Return a datetime value for UTC time now."""
+ return datetime.now(timezone.utc)
+
+
+def timestamp(dtv):
+ """Return an ISO 8601 string for datetime value `dtv`."""
+ return datetime.isoformat(dtv)
+
+
+def timevalue(string):
+ """Return a datetime value for ISO 8601 `string`."""
+ return datetime.fromisoformat(string)
+
+
+def main():
+ """Run tests"""
+ aparser = ArgumentParser(description=main.__doc__)
+ aparser.add_argument(
+ "--basedir",
+ help=" ".join(
+ (
+ "The base directory which tests are relative to.",
+ "If not supplied tests are relative to directory of `input`.",
+ )
+ ),
+ )
+ aparser.add_argument(
+ "--imagedir",
+ help=" ".join(
+ (
+ "The directory which plot image files are to be generated in.",
+ "If not supplied then no plots are generated.",
+ )
+ ),
+ )
+ aparser.add_argument(
+ "--plotter",
+ default="plot.py",
+ help=" ".join(
+ (
+ "Generate a plot of test data by calling a script with this name.",
+ "The script must be colocated with the test implementation.",
+ "The args to the script are the output filename, followed by the",
+ "same argv as passed to the test implementation.",
+ "Ignored if plots are not generated.",
+ )
+ ),
+ )
+ aparser.add_argument(
+ "baseurl",
+ help="The base URL which test ids are relative to.",
+ )
+ aparser.add_argument(
+ "input",
+ help=" ".join(
+ (
+ "File containing tests to run or '-' to read from stdin.",
+ "Each test is specified on a separate line as a JSON array.",
+ "The first element is the name of the test implementation,",
+ "relative to `--basedir`.",
+ "The remaining elements are args to the test implementation.",
+ )
+ ),
+ )
+ args = aparser.parse_args()
+ basedir = args.basedir or os.path.dirname(args.input)
+ builder = UriBuilder(args.baseurl)
+ with open_input(args.input) as fid:
+ source = Source(sequence(json.loads(line) for line in fid))
+ for test, *test_args in source.next():
+ id_ = builder.build(os.path.dirname(test))
+ testimpl = os.path.join(basedir, test)
+ start = timenow()
+ result = drive(testimpl, *test_args)
+ end = timenow()
+ result["id"] = id_
+ if "timestamp" not in result:
+ result["timestamp"] = timestamp(start)
+ result["duration"] = (end - start).total_seconds()
+ if result["result"] in (True, False) and args.imagedir:
+ plotter = os.path.join(os.path.dirname(testimpl), args.plotter)
+ if os.path.isfile(plotter):
+ prefix = os.path.join(
+ args.imagedir,
+ os.path.splitext(test)[0].strip("/").replace("/", "_"),
+ )
+ result["plot"] = plot(plotter, prefix, *test_args)
+ # Python exits with error code 1 on EPIPE
+ if not print_line(json.dumps(result)):
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testdrive/src/testdrive/source.py b/testdrive/src/testdrive/source.py
new file mode 100644
index 0000000..66755ad
--- /dev/null
+++ b/testdrive/src/testdrive/source.py
@@ -0,0 +1,22 @@
+### SPDX-License-Identifier: GPL-2.0-or-later
+
+"""Test sources"""
+
+
+def sequence(*args):
+ """A generator of a linear sequence of tests from `args`."""
+ yield from args
+
+
+class Source:
+ """A source of tests to run."""
+
+ def __init__(self, generator):
+ self._generator = generator
+
+ def next(self):
+ """Return the next test to run, or None."""
+ try:
+ return next(self._generator)
+ except StopIteration:
+ return None
diff --git a/testdrive/src/testdrive/uri.py b/testdrive/src/testdrive/uri.py
new file mode 100644
index 0000000..f7d4581
--- /dev/null
+++ b/testdrive/src/testdrive/uri.py
@@ -0,0 +1,61 @@
+### SPDX-License-Identifier: GPL-2.0-or-later
+
+"""Test URIs"""
+
+from urllib.parse import (
+ urlsplit,
+ urlunsplit,
+ urlencode,
+ quote_plus,
+)
+
+
+class UriBuilder:
+ """A builder of URIs relative to a `base` absolute URI"""
+
+ def __init__(self, base, **kwargs):
+ (scheme, authority, path, query, fragment) = urlsplit(base)
+ if not scheme or query or fragment or base.endswith("#"):
+ raise ValueError(base)
+ self._is_urn = scheme.lower() == "urn"
+ self._path_sep = ":" if self._is_urn else "/"
+ head = path.split(self._path_sep)
+ if head[-1] == "":
+ head = head[:-1]
+ self._scheme = scheme
+ self._authority = authority
+ self._head = head
+ self._query = urlencode(kwargs, quote_via=quote_plus) if kwargs else None
+
+ def build(self, path):
+ """Build a URI from `path` relative to this instance's base"""
+ tail = path.split("/")
+ if tail[0] == "":
+ tail = tail[1:]
+ if tail[-1] == "":
+ tail = tail[:-1]
+ if not self._is_urn:
+ tail.append("")
+ return urlunsplit(
+ (
+ self._scheme,
+ self._authority,
+ self._path_sep.join(self._head + tail),
+ self._query,
+ None, # never supply a fragment
+ )
+ )
+
+ def rebase(self, uri, base):
+ """Return a URI from `uri` rebased under `base`, discarding query"""
+ (scheme, authority, path, _, fragment) = urlsplit(uri)
+ path = path.split(self._path_sep)
+ if (
+ scheme.lower() != self._scheme.lower()
+ or authority != self._authority
+ or path[: len(self._head)] != self._head
+ or fragment
+ or uri.endswith("#")
+ ):
+ raise ValueError(f"cannot rebase {uri}")
+ return UriBuilder(base).build("/".join(path[len(self._head):]))
diff --git a/testdrive/src/testdrive/xml.py b/testdrive/src/testdrive/xml.py
new file mode 100644
index 0000000..3e50755
--- /dev/null
+++ b/testdrive/src/testdrive/xml.py
@@ -0,0 +1,65 @@
+### SPDX-License-Identifier: GPL-2.0-or-later
+
+"""Validate XML against a XSD schema"""
+
+import sys
+from argparse import ArgumentParser
+
+from xml.etree import ElementTree
+
+import xmlschema
+from xmlschema.validators.exceptions import XMLSchemaValidationError
+
+
+def validate(schema, filename):
+ """Validate XML in `filename` against XSD `schema`.
+
+ Return True on validation success, or a string reason on validation failure.
+ """
+ try:
+ xmlschema.validate(filename, schema)
+ return True
+ except XMLSchemaValidationError as exc:
+ return str(exc)
+
+
+def prettify(filename):
+ """Return prettified XML from `filename`."""
+ et_ = ElementTree.parse(filename)
+ ElementTree.indent(et_)
+ return ElementTree.tostring(et_.getroot(), encoding="unicode")
+
+
+def source(filename, encoding="utf-8"):
+ """Return source text from `filename`."""
+ with open(filename, encoding=encoding) as fid:
+ return fid.read()
+
+
+def main():
+ """Validate XML against a XSD schema, printing valid XML to stdout"""
+ aparser = ArgumentParser(description=main.__doc__)
+ aparser.add_argument(
+ "--verbose",
+ action="store_true",
+ help="print validation failure reason on error",
+ )
+ aparser.add_argument(
+ "--prettify",
+ action="store_true",
+ help="pretty print XML output",
+ )
+ aparser.add_argument("schema", help="XSD schema file")
+ aparser.add_argument("filename", help="XML data file")
+ args = aparser.parse_args()
+ reason = validate(args.schema, args.filename)
+ if reason is not True:
+ sys.exit(reason if args.verbose else 1)
+ elif args.prettify:
+ print(prettify(args.filename))
+ else:
+ print(source(args.filename), end="")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testdrive/tests/__init__.py b/testdrive/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/testdrive/tests/testdrive/__init__.py b/testdrive/tests/testdrive/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/testdrive/tests/testdrive/test_run.py b/testdrive/tests/testdrive/test_run.py
new file mode 100644
index 0000000..4708892
--- /dev/null
+++ b/testdrive/tests/testdrive/test_run.py
@@ -0,0 +1,46 @@
+### SPDX-License-Identifier: GPL-2.0-or-later
+
+"""Test cases for testdrive.run"""
+
+import os.path
+
+from unittest import TestCase
+
+from testdrive.run import drive
+
+EXAMPLES = os.path.join(
+ os.path.dirname(__file__),
+ "../../examples/",
+)
+
+
+class TestDrive(TestCase):
+ """Tests for testdrive.run.drive"""
+
+ def test_success(self):
+ """Test testdrive.run.drive with test success"""
+ test = os.path.join(EXAMPLES, "sequence/B/testimpl.py")
+ self.assertEqual(
+ drive(test),
+ {"argv": (), "result": True, "reason": None, "data": {"baz": 99}},
+ )
+
+ def test_failure(self):
+ """Test testdrive.run.drive with test failure"""
+ test = os.path.join(EXAMPLES, "sequence/C/test.sh")
+ self.assertEqual(
+ drive(test),
+ {"argv": (), "result": False, "reason": "no particular reason"},
+ )
+
+ def test_error(self):
+ """Test testdrive.run.drive with test error"""
+ test = os.path.join(EXAMPLES, "terror.sh")
+ self.assertEqual(
+ drive(test),
+ {
+ "argv": (),
+ "result": "error",
+ "reason": f"{test} exited with code 7\n\nfoo\nbaz\n",
+ },
+ )
diff --git a/testdrive/tests/testdrive/test_uri.py b/testdrive/tests/testdrive/test_uri.py
new file mode 100644
index 0000000..b7acee3
--- /dev/null
+++ b/testdrive/tests/testdrive/test_uri.py
@@ -0,0 +1,177 @@
+### SPDX-License-Identifier: GPL-2.0-or-later
+
+"""Test cases for testdrive.uri"""
+
+from unittest import TestCase
+
+from testdrive.uri import UriBuilder
+
+
+class TestUrn(TestCase):
+ """Tests for testdrive.uri.UriBuilder building URN"""
+
+ def test_urn_base_errors(self):
+ """Test testdrive.uri.UriBuilder base URN errors"""
+ with self.assertRaises(ValueError):
+ UriBuilder("urn:path?query=not-allowed")
+ with self.assertRaises(ValueError):
+ UriBuilder("urn:path#fragment-not-allowed")
+ with self.assertRaises(ValueError):
+ UriBuilder("urn:path#") # empty fragment not allowed
+
+ def test_urn_path_rel(self):
+ """Test testdrive.uri.UriBuilder builds URN from relative path"""
+ base = "urn:abc:def"
+ path = "foo/bar"
+ qargs = {"v": 1}
+ for b_suffix in ("", ":"):
+ for p_suffix in ("", "/"):
+ self.assertEqual(
+ UriBuilder(base + b_suffix).build(path + p_suffix),
+ "urn:abc:def:foo:bar",
+ )
+ self.assertEqual(
+ UriBuilder(base + b_suffix, **qargs).build(path + p_suffix),
+ "urn:abc:def:foo:bar?v=1",
+ )
+
+ def test_urn_path_abs(self):
+ """Test testdrive.uri.UriBuilder builds URN from absolute path"""
+ base = "urn:ghi"
+ path = "/quux/corge"
+ qargs = {"v": 2}
+ for b_suffix in ("", ":"):
+ for p_suffix in ("", "/"):
+ self.assertEqual(
+ UriBuilder(base + b_suffix).build(path + p_suffix),
+ "urn:ghi:quux:corge",
+ )
+ self.assertEqual(
+ UriBuilder(base + b_suffix, **qargs).build(path + p_suffix),
+ "urn:ghi:quux:corge?v=2",
+ )
+
+ def test_urn_rebase_errors(self):
+ """Test testdrive.uri.UriBuilder rebase URN errors"""
+ base = "urn:jkl"
+ path = "/wibble/wobble/"
+ qargs = {"x": "y"}
+ builder = UriBuilder(base, **qargs)
+ urn = builder.build(path)
+ other = "https://target/base/"
+ self.assertEqual(
+ builder.rebase(urn, other),
+ "https://target/base/wibble/wobble/",
+ )
+ with self.assertRaises(ValueError):
+ builder.rebase("https://xyz/wibble/wobble/", other)
+ with self.assertRaises(ValueError):
+ builder.rebase("urn:xyz:wibble:wobble", other)
+ with self.assertRaises(ValueError):
+ builder.rebase(urn + "#frag", other)
+ with self.assertRaises(ValueError):
+ builder.rebase(urn + "#", other)
+
+ def test_urn_rebase(self):
+ """Test testdrive.uri.UriBuilder rebases URN"""
+ base1 = "urn:jkl"
+ base2 = "URN:mno:pqr:"
+ path = "/wibble/wobble/"
+ qargs = {"x": "y"}
+ builder = UriBuilder(base1, **qargs)
+ urn = builder.build(path)
+ self.assertEqual(
+ urn,
+ "urn:jkl:wibble:wobble?x=y",
+ )
+ self.assertEqual(
+ builder.rebase(urn, base2),
+ "urn:mno:pqr:wibble:wobble",
+ )
+
+
+class TestUrl(TestCase):
+ """Tests for testdrive.uri.UriBuilder building URL"""
+
+ def test_url_base_errors(self):
+ """Test testdrive.uri.UriBuilder base URL errors"""
+ with self.assertRaises(ValueError):
+ UriBuilder("//authority/path/") # missing scheme not allowed
+ with self.assertRaises(ValueError):
+ UriBuilder("scheme://authority/path?query=not-allowed")
+ with self.assertRaises(ValueError):
+ UriBuilder("scheme://authority/path#fragment-not-allowed")
+ with self.assertRaises(ValueError):
+ UriBuilder("scheme://authority/path#") # empty fragment not allowed
+
+ def test_url_path_rel(self):
+ """Test testdrive.uri.UriBuilder builds URL from relative path"""
+ base = "https://abc.org/def"
+ path = "foo/bar"
+ qargs = {"v": 3}
+ for b_suffix in ("", "/"):
+ for p_suffix in ("", "/"):
+ self.assertEqual(
+ UriBuilder(base + b_suffix).build(path + p_suffix),
+ "https://abc.org/def/foo/bar/",
+ )
+ self.assertEqual(
+ UriBuilder(base + b_suffix, **qargs).build(path + p_suffix),
+ "https://abc.org/def/foo/bar/?v=3",
+ )
+
+ def test_url_path_abs(self):
+ """Test testdrive.uri.UriBuilder builds URL from absolute path"""
+ base = "https://ghi.org"
+ path = "/quux/corge"
+ qargs = {"v": "thud"}
+ for b_suffix in ("", "/"):
+ for p_suffix in ("", "/"):
+ self.assertEqual(
+ UriBuilder(base + b_suffix).build(path + p_suffix),
+ "https://ghi.org/quux/corge/",
+ )
+ self.assertEqual(
+ UriBuilder(base + b_suffix, **qargs).build(path + p_suffix),
+ "https://ghi.org/quux/corge/?v=thud",
+ )
+
+ def test_url_rebase_errors(self):
+ """Test testdrive.uri.UriBuilder rebase URL errors"""
+ base = "https://jkl.co.uk/mno/"
+ path = "/wibble/wobble/"
+ qargs = {"x": "Y"}
+ builder = UriBuilder(base, **qargs)
+ url = builder.build(path)
+ other = "https://target/base/"
+ self.assertEqual(
+ builder.rebase(url, other),
+ "https://target/base/wibble/wobble/",
+ )
+ with self.assertRaises(ValueError):
+ builder.rebase("ftp://jkl.co.uk/mno/wibble/wobble/", other)
+ with self.assertRaises(ValueError):
+ builder.rebase("https://mno.co.uk/mno/wibble/wobble/", other)
+ with self.assertRaises(ValueError):
+ builder.rebase("https://jkl.co.uk/jkl/wibble/wobble/", other)
+ with self.assertRaises(ValueError):
+ builder.rebase(url + "#frag", other)
+ with self.assertRaises(ValueError):
+ builder.rebase(url + "#", other)
+
+ def test_url_rebase(self):
+ """Test testdrive.uri.UriBuilder rebases URL"""
+ base1 = "https://jkl.com/"
+ base2 = "ftp://mno.pqr/stu/"
+ path = "/wibble/wobble/"
+ qargs = {"X": "y"}
+ builder = UriBuilder(base1, **qargs)
+ url = builder.build(path)
+ self.assertEqual(
+ url,
+ "https://jkl.com/wibble/wobble/?X=y",
+ )
+ self.assertEqual(
+ builder.rebase(url, base2),
+ "ftp://mno.pqr/stu/wibble/wobble/",
+ )