From 43c72be56236655bd3b47764957285e93e542cfe Mon Sep 17 00:00:00 2001 From: GabrielChenCC <119652423+GabrielChenCC@users.noreply.github.com> Date: Fri, 19 Jan 2024 23:06:00 +0800 Subject: [PATCH 001/108] Update the rotation test cases to against the bug (bugfix) (#954) Update the rotation test cases to against the bug https://bugs.launchpad.net/sutton/+bug/2045249 --- providers/base/units/graphics/jobs.pxu | 3 ++- providers/base/units/suspend/suspend-graphics.pxu | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/providers/base/units/graphics/jobs.pxu b/providers/base/units/graphics/jobs.pxu index 34c2294303..d48f2b5a20 100644 --- a/providers/base/units/graphics/jobs.pxu +++ b/providers/base/units/graphics/jobs.pxu @@ -253,7 +253,8 @@ _description: This test will test display rotation on the {vendor} {product} graphics card STEPS: 1. Click "Test" to test display rotation. The display will be rotated every 4 seconds. - 2. Check if all rotations (normal right inverted left) took place without permanent screen corruption + 2. Try moving the mouse or try opening multiple terminals via ‘Ctrl+Alt+T’ every time the screen automatically turns. + 3. Check if all rotations (normal right inverted left) took place without permanent screen corruption. VERIFICATION: Did the display rotation take place without permanent screen corruption? diff --git a/providers/base/units/suspend/suspend-graphics.pxu b/providers/base/units/suspend/suspend-graphics.pxu index 4de9887c53..ece173a44f 100644 --- a/providers/base/units/suspend/suspend-graphics.pxu +++ b/providers/base/units/suspend/suspend-graphics.pxu @@ -304,6 +304,7 @@ _description: This test will test display rotation on the {{ vendor }} {{ product }} graphics card after suspend STEPS: 1. Click "Test" to test display rotation. The display will be rotated every 4 seconds. - 2. Check if all rotations (normal right inverted left) took place without permanent screen corruption + 2. Try moving the mouse or try opening multiple terminals via ‘Ctrl+Alt+T’ every time the screen automatically turns + 3. Check if all rotations (normal right inverted left) took place without permanent screen corruption VERIFICATION: Did the display rotation take place without permanent screen corruption after suspend? From 9fbbf7b47a5ea923acfbd4b75b365c804658628d Mon Sep 17 00:00:00 2001 From: eugene-yujinwu <117058060+eugene-yujinwu@users.noreply.github.com> Date: Fri, 19 Jan 2024 23:30:33 +0800 Subject: [PATCH 002/108] Fix the wrong file name from pactl.sh to pactl_list.sh (Bugfix) (#947) fix(provider-base): a typo made pactl_list.sh as pactl.sh --- providers/base/units/audio/jobs.pxu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/providers/base/units/audio/jobs.pxu b/providers/base/units/audio/jobs.pxu index 10c5a09e7e..da9e46929e 100644 --- a/providers/base/units/audio/jobs.pxu +++ b/providers/base/units/audio/jobs.pxu @@ -778,7 +778,7 @@ command: if check_audio_deamon.sh ; then pipewire_utils.py detect -t audio -c sinks else - pactl.sh sinks + pactl_list.sh sinks fi _description: Test to detect if there's available sources and sinks after suspending 30 times. From 0f68c70df86a5ab53895a0e0bb15edf8a495b513 Mon Sep 17 00:00:00 2001 From: Sylvain Pineau Date: Sat, 20 Jan 2024 19:43:29 +0100 Subject: [PATCH 003/108] Set inxi permissions (+x) in checkbox-ng debian packaging (BugFix) (#956) --- checkbox-ng/debian/rules | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/checkbox-ng/debian/rules b/checkbox-ng/debian/rules index 0d1fe1b9cd..d5a942b7c4 100755 --- a/checkbox-ng/debian/rules +++ b/checkbox-ng/debian/rules @@ -36,6 +36,12 @@ override_dh_clean: dh_clean rm -rf plainbox/impl/providers/categories/build +# Override dh_fixperms to ensure inxi is set as an executable (required for bionic builds) +override_dh_fixperms: + dh_fixperms + chmod 755 debian/python3-checkbox-ng/usr/lib/python3/dist-packages/plainbox/vendor/inxi + + # Drop the empty python-3.4 directory # Taken from https://wiki.debian.org/Python/LibraryStyleGuide override_dh_python3: From 7a6cb78c2065b6875ead987e5ae279b475564f34 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Mon, 22 Jan 2024 14:10:26 +0100 Subject: [PATCH 004/108] Fix unknown function in tests for checkbox-support (bugfix) (#958) * Fixed unknown method name in checkbox-support test * Called once with counts before filtering --- .../checkbox_support/snap_utils/tests/test_config.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/checkbox-support/checkbox_support/snap_utils/tests/test_config.py b/checkbox-support/checkbox_support/snap_utils/tests/test_config.py index 0d43332e70..9f4ff9b9b5 100644 --- a/checkbox-support/checkbox_support/snap_utils/tests/test_config.py +++ b/checkbox-support/checkbox_support/snap_utils/tests/test_config.py @@ -85,17 +85,17 @@ def test_smoke(self): m = mock_open() with patch('builtins.open', m): write_checkbox_conf({'foo': 'bar'}) - m().write.called_once_with('[environ]\n') - m().write.called_once_with('FOO = bar\n') - m().write.called_once_with('\n') + m().write.assert_called_with('[environ]\n') + m().write.assert_called_with('FOO = bar\n') + m().write.assert_called_with('\n') self.assertEqual(m().write.call_count, 3) def test_writes_empty(self): m = mock_open() with patch('builtins.open', m): write_checkbox_conf({}) - m().write.called_once_with('[environ]\n') - m().write.called_once_with('\n') + m().write.assert_called_with('[environ]\n') + m().write.assert_called_with('\n') self.assertEqual(m().write.call_count, 2) From 014cc1f4d92961ae9c7bc34d2db665e3143681cb Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Mon, 22 Jan 2024 14:11:44 +0100 Subject: [PATCH 005/108] Fixed SyntaxWarning correctly marking regex strings (bugfix) (#959) Fixed SyntaxWarning correctly marking regex strings --- .../checkbox_support/parsers/pactl.py | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/checkbox-support/checkbox_support/parsers/pactl.py b/checkbox-support/checkbox_support/parsers/pactl.py index 1786d2177d..98d765b56a 100644 --- a/checkbox-support/checkbox_support/parsers/pactl.py +++ b/checkbox-support/checkbox_support/parsers/pactl.py @@ -170,7 +170,7 @@ class Profile(Node): lambda t: t[0].rstrip(':') ).setResultsName("profile-name") + p.delimitedList( - p.Literal("(HDMI)") | p.Literal("(IEC958)") | p.Regex('[^ (\n]+'), + p.Literal("(HDMI)") | p.Literal("(IEC958)") | p.Regex(r'[^ (\n]+'), ' ', combine=True ).setResultsName('profile-label') + p.Suppress('(') @@ -229,7 +229,7 @@ class Port(Node): # anything other than a space and '(', delimited by a single # whitespace. + p.delimitedList( - p.Regex('[^ (\n]+'), ' ', combine=True + p.Regex(r'[^ (\n]+'), ' ', combine=True ).setResultsName('port-label') + p.Suppress('(') + p.Optional( @@ -314,10 +314,10 @@ class PortWithProfile(Node): + p.Combine( p.OneOrMore( ~p.FollowedBy( - p.Regex('\(.+?\)') + p.Regex(r'\(.+?\)') + p.LineEnd() ) - + p.Regex('[^ \n]+') + + p.Regex(r'[^ \n]+') + p.White().suppress() ), ' ' @@ -384,13 +384,13 @@ class PortWithProfile(Node): # Non-collection attributes # ========================= -AttributeName = p.Regex("[a-zA-Z][^:\n]+").setResultsName("attribute-name") +AttributeName = p.Regex(r"[a-zA-Z][^:\n]+").setResultsName("attribute-name") ActivePortAttributeValue = ( p.Combine( p.Or([p.Literal('[Out] '), p.Literal('[In] ')]).suppress() - + p.Regex("[^\n]*") + + p.Regex(r"[^\n]*") + p.LineEnd().suppress(), adjacent=False ).setResultsName("attribute-value") @@ -402,27 +402,27 @@ class PortWithProfile(Node): p.Or([ p.Or([ p.Literal("(invalid)"), - p.Regex("([0-9]+: +[0-9]+% ?)+") + p.Regex(r"([0-9]+: +[0-9]+% ?)+") ]), p.Or([ p.Literal("(invalid)"), - p.Regex("([0-9]+: +[0-9]+% ?)+") + p.Regex(r"([0-9]+: +[0-9]+% ?)+") ]) + p.LineEnd() + p.Optional(p.White('\t').suppress()) + p.Or([ p.Literal("(invalid)"), - p.Regex("([0-9]+: -?([0-9]+\.[0-9]+|inf) dB ?)+"), + p.Regex(r"([0-9]+: -?([0-9]+\.[0-9]+|inf) dB ?)+"), ]), p.Or([ p.Literal("(invalid)"), - p.Regex("([\w\-]+: [0-9]+ / +[0-9]+%(?: /" + p.Regex(r"([\w\-]+: [0-9]+ / +[0-9]+%(?: /" " +-?([0-9]+\.[0-9]+|inf) dB)?,? *)+") ]) ]) + p.LineEnd() + p.Optional(p.White('\t').suppress()) - + p.Regex("balance -?[0-9]+\.[0-9]+") + + p.Regex(r"balance -?[0-9]+\.[0-9]+") + p.LineEnd(), adjacent=False ).setResultsName("attribute-value") @@ -431,10 +431,10 @@ class PortWithProfile(Node): BaseVolumeAttributeValue = ( p.Combine( - p.Regex("[0-9]+%") + p.Regex(r"[0-9]+%") + p.LineEnd() + p.Optional(p.White('\t').suppress()) - + p.Regex("-?[0-9]+\.[0-9]+ dB") + + p.Regex(r"-?[0-9]+\.[0-9]+ dB") + p.LineEnd(), adjacent=False ).setResultsName("attribute-value") @@ -442,7 +442,7 @@ class PortWithProfile(Node): SimpleAttributeValue = ( - p.Regex("[^\n]*").setResultsName("attribute-value") + p.Regex(r"[^\n]*").setResultsName("attribute-value") + p.LineEnd().suppress()) # simple values @@ -565,7 +565,7 @@ class Record(Node): __syntax__ = ( p.LineStart() + p.NotAny(p.White(' \t')) - + p.Regex("[A-Z][a-zA-Z ]+ #[0-9]+").setResultsName("record-name") + + p.Regex(r"[A-Z][a-zA-Z ]+ #[0-9]+").setResultsName("record-name") + p.LineEnd().suppress() + p.OneOrMore( p.Or([ From 8bfa77e7f742c24696a117fb18a46c5670eaadc9 Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Mon, 22 Jan 2024 22:06:23 +0800 Subject: [PATCH 006/108] Rename ARM Vector Floating Point templates to avoid clash (BugFix) (#949) * Rename ARM Vector Floating Point template units to avoid clash These two templates use the same `id` which is a problem when ensuring every template has a unique id field. * Update test plans accordingly --- providers/base/units/cpu/jobs.pxu | 4 ++-- providers/base/units/cpu/test-plan.pxu | 8 +++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/providers/base/units/cpu/jobs.pxu b/providers/base/units/cpu/jobs.pxu index 9153a5d254..02fd3409b1 100644 --- a/providers/base/units/cpu/jobs.pxu +++ b/providers/base/units/cpu/jobs.pxu @@ -176,7 +176,7 @@ template-filter: cpuinfo.platform == 'armv7l' template-unit: job plugin: shell category_id: com.canonical.plainbox::cpu -id: cpu/arm_vfp_support_{platform} +id: cpu/armhf_vfp_support_{platform} estimated_duration: 1.0 user: root command: @@ -192,7 +192,7 @@ template-filter: cpuinfo.platform == 'aarch64' template-unit: job plugin: shell category_id: com.canonical.plainbox::cpu -id: cpu/arm_vfp_support_{platform} +id: cpu/arm64_vfp_support_{platform} estimated_duration: 1.0 user: root command: diff --git a/providers/base/units/cpu/test-plan.pxu b/providers/base/units/cpu/test-plan.pxu index 14a038c7ed..42a30fcf41 100644 --- a/providers/base/units/cpu/test-plan.pxu +++ b/providers/base/units/cpu/test-plan.pxu @@ -85,7 +85,8 @@ include: cpu/clocktest cpu/offlining_test cpu/topology - cpu/arm_vfp_support_.* + cpu/armhf_vfp_support_.* + cpu/arm64_vfp_support_.* cpu/cstates cpu/cstates_results.log @@ -99,6 +100,7 @@ include: cpu/maxfreq_test certification-status=blocker cpu/maxfreq_test-log-attach certification-status=non-blocker cpu/topology certification-status=blocker - cpu/arm_vfp_support_.* certification-status=non-blocker + cpu/armhf_vfp_support_.* certification-status=non-blocker + cpu/arm64_vfp_support_.* certification-status=non-blocker cpu/cstates certification-status=non-blocker - cpu/cstates_results.log certification-status=non-blocker \ No newline at end of file + cpu/cstates_results.log certification-status=non-blocker From 130710d1af8db04e98392ddb9b645d042201aa2f Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Mon, 22 Jan 2024 16:49:27 +0100 Subject: [PATCH 007/108] Renamed packaging.py to packaging_metadata.py (Bugfix) (#944) * Renamed packaging.py to packaging_metadata.py This change was made to avoid naming conflicts when importing the packaging module from packaging.py * Added tests for category outcome map * Formatted test state --- .../plainbox/impl/session/test_state.py | 552 +++++++++++------- .../{packaging.py => packaging_metadata.py} | 0 ...ackaging.py => test_packaging_metadata.py} | 6 +- checkbox-ng/plainbox/provider_manager.py | 4 +- checkbox-ng/pyproject.toml | 2 +- checkbox-ng/setup.cfg | 2 +- 6 files changed, 347 insertions(+), 219 deletions(-) rename checkbox-ng/plainbox/impl/unit/{packaging.py => packaging_metadata.py} (100%) rename checkbox-ng/plainbox/impl/unit/{test_packaging.py => test_packaging_metadata.py} (98%) diff --git a/checkbox-ng/plainbox/impl/session/test_state.py b/checkbox-ng/plainbox/impl/session/test_state.py index 92ab82466a..839e294cd9 100644 --- a/checkbox-ng/plainbox/impl/session/test_state.py +++ b/checkbox-ng/plainbox/impl/session/test_state.py @@ -42,24 +42,25 @@ from plainbox.impl.session.state import SessionMetaData from plainbox.impl.testing_utils import make_job from plainbox.impl.unit.job import JobDefinition -from plainbox.impl.unit.unit import Unit +from plainbox.impl.unit.category import CategoryUnit + from plainbox.impl.unit.unit_with_id import UnitWithId from plainbox.vendor import mock from plainbox.vendor.morris import SignalTestCase def load_tests(loader, tests, ignore): - tests.addTests(DocTestSuite( - 'plainbox.impl.session.state', optionflags=REPORT_NDIFF)) + tests.addTests( + DocTestSuite("plainbox.impl.session.state", optionflags=REPORT_NDIFF) + ) return tests class SessionStateSmokeTests(TestCase): - def setUp(self): - A = make_job('A', requires='R.attr == "value"') - B = make_job('B', depends='C') - C = make_job('C') + A = make_job("A", requires='R.attr == "value"') + B = make_job("B", depends="C") + C = make_job("C") self.job_list = [A, B, C] self.session_state = SessionState(self.job_list) @@ -79,7 +80,7 @@ def test_initial_run_list(self): self.assertEqual(expected, observed) def test_update_mandatory_job_list_updates(self): - D = make_job('D') + D = make_job("D") self.session_state.update_mandatory_job_list([D]) expected = [D] observed = self.session_state.mandatory_job_list @@ -90,7 +91,7 @@ class RegressionTests(TestCase): # Tests for bugfixes def test_crash_on_missing_job(self): - """ http://pad.lv/1334296 """ + """http://pad.lv/1334296""" A = make_job("A") state = SessionState([]) problems = state.update_desired_job_list([A]) @@ -100,8 +101,8 @@ def test_crash_on_missing_job(self): def test_crash_in_update_desired_job_list(self): # This checks if a DependencyError can cause crash # update_desired_job_list() with a ValueError, in certain conditions. - A = make_job('A', depends='X') - L = make_job('L', plugin='shell') + A = make_job("A", depends="X") + L = make_job("L", plugin="shell") session = SessionState([A, L]) problems = session.update_desired_job_list([A, L]) # We should get exactly one DependencyMissingError related to job A and @@ -131,39 +132,81 @@ def test_init_with_colliding_jobs(self): self.assertIs(call.exception.affected_job, different_A) def test_dont_remove_missing_jobs(self): - """ http://pad.lv/1444126 """ + """http://pad.lv/1444126""" A = make_job("A", depends="B") B = make_job("B", depends="C") state = SessionState([A, B]) problems = state.update_desired_job_list([A, B]) - self.assertEqual(problems, [ - DependencyMissingError(B, 'C', 'direct'), - DependencyMissingError(A, 'B', 'direct'), - ]) + self.assertEqual( + problems, + [ + DependencyMissingError(B, "C", "direct"), + DependencyMissingError(A, "B", "direct"), + ], + ) self.assertEqual(state.desired_job_list, []) self.assertEqual(state.run_list, []) + def test_category_outcome_map(self): + cat_a = CategoryUnit({"id": "a", "name": "The a category"}) + job_a = JobDefinition({"id": "a_job", "category_id": "a"}) + state = SessionState([cat_a, job_a]) + + # Test different outcomes for a single category + result_pass = MemoryJobResult({"outcome": IJobResult.OUTCOME_PASS}) + state.update_job_result(job_a, result_pass) + self.assertEqual( + state.category_outcome_map, + {"a": IJobResult.OUTCOME_PASS}, + ) + + result_fail = MemoryJobResult({"outcome": IJobResult.OUTCOME_FAIL}) + state.update_job_result(job_a, result_fail) + self.assertEqual( + state.category_outcome_map, + {"a": IJobResult.OUTCOME_FAIL}, + ) + + result_skip = MemoryJobResult({"outcome": IJobResult.OUTCOME_SKIP}) + state.update_job_result(job_a, result_skip) + self.assertEqual( + state.category_outcome_map, + {"a": IJobResult.OUTCOME_SKIP}, + ) + + # Test different outcomes for non valid jobs + job_res = JobDefinition( + {"id": "resource", "plugin": "resource", "category_id": "a"} + ) + job_b = JobDefinition({"id": "b_job", "category_id": "b"}) + state = SessionState([cat_a, job_a, job_res, job_b]) + + state.update_job_result(job_a, result_pass) + self.assertEqual( + state.category_outcome_map, + {"a": IJobResult.OUTCOME_PASS}, + ) -class SessionStateAPITests(TestCase): +class SessionStateAPITests(TestCase): def test_set_resource_list(self): # Define an empty session session = SessionState([]) # Define a resource - old_res = Resource({'attr': 'old value'}) + old_res = Resource({"attr": "old value"}) # Set the resource list with the old resource # So here the old result is stored into a new 'R' resource - session.set_resource_list('R', [old_res]) + session.set_resource_list("R", [old_res]) # Ensure that it worked - self.assertEqual(session._resource_map, {'R': [old_res]}) + self.assertEqual(session._resource_map, {"R": [old_res]}) # Define another resource - new_res = Resource({'attr': 'new value'}) + new_res = Resource({"attr": "new value"}) # Now we present the second result for the same job - session.set_resource_list('R', [new_res]) + session.set_resource_list("R", [new_res]) # What should happen here is that the R resource is entirely replaced # by the data from the new result. The data should not be merged or # appended in any way. - self.assertEqual(session._resource_map, {'R': [new_res]}) + self.assertEqual(session._resource_map, {"R": [new_res]}) def test_add_unit(self): # Define a job @@ -183,7 +226,8 @@ def test_add_unit(self): # The job is not selected to run self.assertEqual( session.job_state_map[job.id].readiness_inhibitor_list, - [UndesiredJobReadinessInhibitor]) + [UndesiredJobReadinessInhibitor], + ) def test_add_unit_duplicate_job(self): # Define a job @@ -216,7 +260,7 @@ def test_add_unit_clashing_job(self): # The job got added to job list self.assertIn(job, session.job_list) # Define a different job that clashes with the initial job - clashing_job = make_job("A", plugin='other') + clashing_job = make_job("A", plugin="other") self.assertNotEqual(job, clashing_job) self.assertEqual(job.id, clashing_job.id) # Try adding it to the session @@ -241,8 +285,8 @@ def test_add_sibling_unit(self): # Both jobs got added to job list self.assertEqual(len(session.job_list), 2) self.assertIn(job, session.job_list) - self.assertEqual(session.job_list[1].id, 'B') - self.assertEqual(session.job_list[1].summary, 'foo') + self.assertEqual(session.job_list[1].id, "B") + self.assertEqual(session.job_list[1].summary, "foo") sibling = session.job_list[1] # Both jobs got added to job state map self.assertIs(session.job_state_map[job.id].job, job) @@ -256,14 +300,16 @@ def test_add_sibling_unit(self): # Both jobs are not selected to run self.assertEqual( session.job_state_map[job.id].readiness_inhibitor_list, - [UndesiredJobReadinessInhibitor]) + [UndesiredJobReadinessInhibitor], + ) self.assertEqual( session.job_state_map[sibling.id].readiness_inhibitor_list, - [UndesiredJobReadinessInhibitor]) + [UndesiredJobReadinessInhibitor], + ) def test_also_after_suspend_flag(self): # Define a job - job = make_job("A", summary="foo", flags='also-after-suspend') + job = make_job("A", summary="foo", flags="also-after-suspend") # Define an empty session session = SessionState([]) # Add the job to the session @@ -271,14 +317,14 @@ def test_also_after_suspend_flag(self): # Both jobs got added to job list self.assertEqual(len(session.job_list), 2) self.assertIn(job, session.job_list) - self.assertEqual(session.job_list[1].id, 'after-suspend-A') - self.assertEqual(session.job_list[1].summary, 'foo after suspend (S3)') + self.assertEqual(session.job_list[1].id, "after-suspend-A") + self.assertEqual(session.job_list[1].summary, "foo after suspend (S3)") self.assertEqual( session.job_list[1].depends, - ('A com.canonical.certification::suspend/' - 'suspend_advanced_auto')) + ("A com.canonical.certification::suspend/suspend_advanced_auto"), + ) sibling = session.job_list[1] - self.assertNotIn('also-after-suspend', sibling.get_flag_set()) + self.assertNotIn("also-after-suspend", sibling.get_flag_set()) # Both jobs got added to job state map self.assertIs(session.job_state_map[job.id].job, job) self.assertIs(session.job_state_map[sibling.id].job, sibling) @@ -291,14 +337,16 @@ def test_also_after_suspend_flag(self): # Both jobs are not selected to run self.assertEqual( session.job_state_map[job.id].readiness_inhibitor_list, - [UndesiredJobReadinessInhibitor]) + [UndesiredJobReadinessInhibitor], + ) self.assertEqual( session.job_state_map[sibling.id].readiness_inhibitor_list, - [UndesiredJobReadinessInhibitor]) + [UndesiredJobReadinessInhibitor], + ) def test_also_after_suspend_manual_flag(self): # Define a job - job = make_job("A", summary="foo", flags='also-after-suspend-manual') + job = make_job("A", summary="foo", flags="also-after-suspend-manual") # Define an empty session session = SessionState([]) # Add the job to the session @@ -306,13 +354,14 @@ def test_also_after_suspend_manual_flag(self): # Both jobs got added to job list self.assertEqual(len(session.job_list), 2) self.assertIn(job, session.job_list) - self.assertEqual(session.job_list[1].id, 'after-suspend-manual-A') - self.assertEqual(session.job_list[1].summary, 'foo after suspend (S3)') + self.assertEqual(session.job_list[1].id, "after-suspend-manual-A") + self.assertEqual(session.job_list[1].summary, "foo after suspend (S3)") self.assertEqual( session.job_list[1].depends, - 'A com.canonical.certification::suspend/suspend_advanced') + "A com.canonical.certification::suspend/suspend_advanced", + ) sibling = session.job_list[1] - self.assertNotIn('also-after-suspend-manual', sibling.get_flag_set()) + self.assertNotIn("also-after-suspend-manual", sibling.get_flag_set()) # Both jobs got added to job state map self.assertIs(session.job_state_map[job.id].job, job) self.assertIs(session.job_state_map[sibling.id].job, sibling) @@ -325,66 +374,86 @@ def test_also_after_suspend_manual_flag(self): # Both jobs are not selected to run self.assertEqual( session.job_state_map[job.id].readiness_inhibitor_list, - [UndesiredJobReadinessInhibitor]) + [UndesiredJobReadinessInhibitor], + ) self.assertEqual( session.job_state_map[sibling.id].readiness_inhibitor_list, - [UndesiredJobReadinessInhibitor]) + [UndesiredJobReadinessInhibitor], + ) def test_get_estimated_duration_auto(self): # Define jobs with an estimated duration - one_second = make_job("one_second", plugin="shell", - command="foobar", - estimated_duration=1.0) - half_second = make_job("half_second", plugin="shell", - command="barfoo", - estimated_duration=0.5) + one_second = make_job( + "one_second", + plugin="shell", + command="foobar", + estimated_duration=1.0, + ) + half_second = make_job( + "half_second", + plugin="shell", + command="barfoo", + estimated_duration=0.5, + ) session = SessionState([one_second, half_second]) session.update_desired_job_list([one_second, half_second]) self.assertEqual(session.get_estimated_duration(), (1.5, 0.0)) def test_get_estimated_duration_manual(self): - two_seconds = make_job("two_seconds", plugin="manual", - command="farboo", - estimated_duration=2.0) - shell_job = make_job("shell_job", plugin="shell", - command="boofar", - estimated_duration=0.6) + two_seconds = make_job( + "two_seconds", + plugin="manual", + command="farboo", + estimated_duration=2.0, + ) + shell_job = make_job( + "shell_job", + plugin="shell", + command="boofar", + estimated_duration=0.6, + ) session = SessionState([two_seconds, shell_job]) session.update_desired_job_list([two_seconds, shell_job]) self.assertEqual(session.get_estimated_duration(), (0.6, 32.0)) def test_get_estimated_duration_automated_unknown(self): - three_seconds = make_job("three_seconds", plugin="shell", - command="frob", - estimated_duration=3.0) - no_estimated_duration = make_job("no_estimated_duration", - plugin="shell", - command="borf") + three_seconds = make_job( + "three_seconds", + plugin="shell", + command="frob", + estimated_duration=3.0, + ) + no_estimated_duration = make_job( + "no_estimated_duration", plugin="shell", command="borf" + ) session = SessionState([three_seconds, no_estimated_duration]) session.update_desired_job_list([three_seconds, no_estimated_duration]) self.assertEqual(session.get_estimated_duration(), (None, 0.0)) def test_get_estimated_duration_manual_unknown(self): - four_seconds = make_job("four_seconds", plugin="shell", - command="fibble", - estimated_duration=4.0) - no_estimated_duration = make_job("no_estimated_duration", - plugin="user-verify", - command="bibble") + four_seconds = make_job( + "four_seconds", + plugin="shell", + command="fibble", + estimated_duration=4.0, + ) + no_estimated_duration = make_job( + "no_estimated_duration", plugin="user-verify", command="bibble" + ) session = SessionState([four_seconds, no_estimated_duration]) session.update_desired_job_list([four_seconds, no_estimated_duration]) self.assertEqual(session.get_estimated_duration(), (4.0, None)) def test_update_mandatory_job_list_affects_run_list(self): - A = make_job('A') + A = make_job("A") session = SessionState([A]) session.update_mandatory_job_list([A]) session.update_desired_job_list([]) self.assertEqual(session.run_list, [A]) def test_mandatory_jobs_are_first_in_run_list(self): - A = make_job('A') - B = make_job('B') + A = make_job("A") + B = make_job("B") session = SessionState([A, B]) session.update_mandatory_job_list([B]) session.update_desired_job_list([A]) @@ -400,7 +469,8 @@ def test_system_information_collection_called(self): ) as collect_system_information_mock: return_value = getter(self_mock) self.assertEqual( - return_value, collect_system_information_mock.return_value + return_value, + collect_system_information_mock.return_value, ) def test_system_information_collection_cached(self): @@ -431,7 +501,7 @@ class SessionStateTrimTests(TestCase): def setUp(self): self.job_a = make_job("a") self.job_b = make_job("b") - self.origin = mock.Mock(name='origin', spec_set=Origin) + self.origin = mock.Mock(name="origin", spec_set=Origin) self.session = SessionState([self.job_a, self.job_b]) def test_trim_does_remove_jobs(self): @@ -453,7 +523,7 @@ def test_trim_does_remove_resources(self): """ verify that trim_job_list() removes resources for removed jobs """ - self.session.set_resource_list("a", [Resource({'attr': 'value'})]) + self.session.set_resource_list("a", [Resource({"attr": "value"})]) self.assertIn("a", self.session.resource_map) self.session.trim_job_list(JobIdQualifier("a", self.origin)) self.assertNotIn("a", self.session.resource_map) @@ -468,6 +538,7 @@ def on_job_removed(job): self.assertIs(job, self.job_a) nonlocal signal_fired signal_fired = True + self.session.on_job_removed.connect(on_job_removed) self.session.trim_job_list(JobIdQualifier("a", self.origin)) self.assertTrue(signal_fired) @@ -481,6 +552,7 @@ def test_trim_fires_on_job_state_map_changed(self): def on_job_state_map_changed(): nonlocal signal_fired signal_fired = True + self.session.on_job_state_map_changed.connect(on_job_state_map_changed) self.session.trim_job_list(JobIdQualifier("a", self.origin)) self.assertTrue(signal_fired) @@ -495,6 +567,7 @@ def test_trim_fires_on_job_state_map_changed_only_when_needed(self): def on_job_state_map_changed(): nonlocal signal_fired signal_fired = True + self.session.on_job_state_map_changed.connect(on_job_state_map_changed) self.session.trim_job_list(JobIdQualifier("x", self.origin)) self.assertFalse(signal_fired) @@ -508,8 +581,8 @@ def test_trim_raises_ValueError_for_jobs_on_run_list(self): with self.assertRaises(ValueError) as boom: self.session.trim_job_list(JobIdQualifier("a", self.origin)) self.assertEqual( - str(boom.exception), - "cannot remove jobs that are on the run list: a") + str(boom.exception), "cannot remove jobs that are on the run list: a" + ) class SessionStateReactionToJobResultTests(TestCase): @@ -529,10 +602,9 @@ def setUp(self): self.job_A = make_job("A", requires="R.attr == 'value'") self.job_A_expr = self.job_A.get_resource_program().expression_list[0] self.job_R = make_job("R", plugin="resource") - self.job_X = make_job("X", depends='Y') + self.job_X = make_job("X", depends="Y") self.job_Y = make_job("Y") - self.job_list = [ - self.job_A, self.job_R, self.job_X, self.job_Y] + self.job_list = [self.job_A, self.job_R, self.job_X, self.job_Y] self.session = SessionState(self.job_list) def job_state(self, id): @@ -554,18 +626,26 @@ def test_assumptions(self): self.assertEqual(self.session.desired_job_list, []) # All jobs have state objects that indicate they cannot run (because # they have the UNDESIRED inhibitor set for them by default). - self.assertFalse(self.job_state('A').can_start()) - self.assertFalse(self.job_state('R').can_start()) - self.assertFalse(self.job_state('X').can_start()) - self.assertFalse(self.job_state('Y').can_start()) - self.assertEqual(self.job_inhibitor('A', 0).cause, - InhibitionCause.UNDESIRED) - self.assertEqual(self.job_inhibitor('R', 0).cause, - InhibitionCause.UNDESIRED) - self.assertEqual(self.job_inhibitor('X', 0).cause, - InhibitionCause.UNDESIRED) - self.assertEqual(self.job_inhibitor('Y', 0).cause, - InhibitionCause.UNDESIRED) + self.assertFalse(self.job_state("A").can_start()) + self.assertFalse(self.job_state("R").can_start()) + self.assertFalse(self.job_state("X").can_start()) + self.assertFalse(self.job_state("Y").can_start()) + self.assertEqual( + self.job_inhibitor("A", 0).cause, + InhibitionCause.UNDESIRED, + ) + self.assertEqual( + self.job_inhibitor("R", 0).cause, + InhibitionCause.UNDESIRED, + ) + self.assertEqual( + self.job_inhibitor("X", 0).cause, + InhibitionCause.UNDESIRED, + ) + self.assertEqual( + self.job_inhibitor("Y", 0).cause, + InhibitionCause.UNDESIRED, + ) def test_desire_job_A_updates_state_map(self): # This function checks what happens when the job A becomes desired via @@ -580,35 +660,40 @@ def test_desire_job_A_updates_state_map(self): self.assertEqual(self.session.run_list, [self.job_R, self.job_A]) # This also recomputes job readiness state so that job R is no longer # undesired, has no other inhibitor and thus can start - self.assertEqual(self.job_state('R').readiness_inhibitor_list, []) - self.assertTrue(self.job_state('R').can_start()) + self.assertEqual(self.job_state("R").readiness_inhibitor_list, []) + self.assertTrue(self.job_state("R").can_start()) # While the A job still cannot run it now has a different inhibitor, # one with the PENDING_RESOURCE cause. The inhibitor also properly # pinpoints the related job and related expression. - self.assertNotEqual(self.job_state('A').readiness_inhibitor_list, []) - self.assertEqual(self.job_inhibitor('A', 0).cause, - InhibitionCause.PENDING_RESOURCE) - self.assertEqual(self.job_inhibitor('A', 0).related_job, self.job_R) - self.assertEqual(self.job_inhibitor('A', 0).related_expression, - self.job_A_expr) - self.assertFalse(self.job_state('A').can_start()) + self.assertNotEqual(self.job_state("A").readiness_inhibitor_list, []) + self.assertEqual( + self.job_inhibitor("A", 0).cause, InhibitionCause.PENDING_RESOURCE + ) + self.assertEqual(self.job_inhibitor("A", 0).related_job, self.job_R) + self.assertEqual( + self.job_inhibitor("A", 0).related_expression, + self.job_A_expr, + ) + self.assertFalse(self.job_state("A").can_start()) def test_resource_job_result_updates_resource_and_job_states(self): # This function checks what happens when a JobResult for job R (which # is a resource job via the resource plugin) is presented to the # session. - result_R = MemoryJobResult({ - 'outcome': IJobResult.OUTCOME_PASS, - 'io_log': [(0, 'stdout', b"attr: value\n")], - }) + result_R = MemoryJobResult( + { + "outcome": IJobResult.OUTCOME_PASS, + "io_log": [(0, "stdout", b"attr: value\n")], + } + ) self.session.update_job_result(self.job_R, result_R) # The most obvious thing that can happen, is that the result is simply # stored in the associated job state object. - self.assertIs(self.job_state('R').result, result_R) + self.assertIs(self.job_state("R").result, result_R) # Initially the _resource_map was empty. SessionState parses the io_log # of results of resource jobs and creates appropriate resource objects. self.assertIn("R", self.session._resource_map) - expected = {'R': [Resource({'attr': 'value'})]} + expected = {"R": [Resource({"attr": "value"})]} self.assertEqual(self.session._resource_map, expected) # As job results are presented to the session the readiness of other # jobs is changed. Since A depends on R via a resource expression and @@ -617,12 +702,14 @@ def test_resource_job_result_updates_resource_and_job_states(self): # have been removed. Since this test does not use # update_desired_job_list() a will still have the UNDESIRED inhibitor # but it will no longer have the PENDING_RESOURCE inhibitor, - self.assertEqual(self.job_inhibitor('A', 0).cause, - InhibitionCause.UNDESIRED) + self.assertEqual( + self.job_inhibitor("A", 0).cause, + InhibitionCause.UNDESIRED, + ) # Now if we put A on the desired list this should clear the UNDESIRED # inhibitor and make A runnable. self.session.update_desired_job_list([self.job_A]) - self.assertTrue(self.job_state('A').can_start()) + self.assertTrue(self.job_state("A").can_start()) def test_normal_job_result_updates(self): # This function checks what happens when a JobResult for job A is @@ -630,35 +717,39 @@ def test_normal_job_result_updates(self): # the initial job result was pretty much identical and the comparison # below would fail to work as the update would have been silently # ignored. - result_A = MemoryJobResult({'outcome': 'different'}) + result_A = MemoryJobResult({"outcome": "different"}) self.session.update_job_result(self.job_A, result_A) # As before the result should be stored as-is - self.assertIs(self.job_state('A').result, result_A) + self.assertIs(self.job_state("A").result, result_A) # Unlike before _resource_map should be left unchanged self.assertEqual(self.session._resource_map, {}) # One interesting observation is that readiness inhibitors are entirely # unaffected by existing test results beyond dependency and resource # relationships. While a result for job A was presented, job A is still # inhibited by the UNDESIRED inhibitor. - self.assertEqual(self.job_inhibitor('A', 0).cause, - InhibitionCause.UNDESIRED) + self.assertEqual( + self.job_inhibitor("A", 0).cause, + InhibitionCause.UNDESIRED, + ) - @mock.patch('plainbox.impl.ctrl.logger') + @mock.patch("plainbox.impl.ctrl.logger") def test_resource_job_with_broken_output(self, mock_logger): # This function checks how SessionState parses partially broken # resource jobs. A JobResult with broken output is constructed below. # The output will describe one proper record, one broken record and # another proper record in that order. - result_R = MemoryJobResult({ - 'outcome': IJobResult.OUTCOME_PASS, - 'io_log': [ - (0, 'stdout', b"attr: value-1\n"), - (1, 'stdout', b"\n"), - (1, 'stdout', b"I-sound-like-a-broken-record\n"), - (1, 'stdout', b"\n"), - (1, 'stdout', b"attr: value-2\n") - ], - }) + result_R = MemoryJobResult( + { + "outcome": IJobResult.OUTCOME_PASS, + "io_log": [ + (0, "stdout", b"attr: value-1\n"), + (1, "stdout", b"\n"), + (1, "stdout", b"I-sound-like-a-broken-record\n"), + (1, "stdout", b"\n"), + (1, "stdout", b"attr: value-2\n"), + ], + } + ) # Since we cannot control the output of scripts and people indeed make # mistakes a warning is issued but no exception is raised to the # caller. @@ -667,15 +758,19 @@ def test_resource_job_with_broken_output(self, mock_logger): # in away which would allow for recovery. Out of all the output only # the first record is created and stored properly. The third, proper # record is entirely ignored. - expected = {'R': [Resource({'attr': 'value-1'})]} + expected = {"R": [Resource({"attr": "value-1"})]} self.assertEqual(self.session._resource_map, expected) # Make sure the right warning was logged mock_logger.warning.assert_called_once_with( "local script %s returned invalid RFC822 data: %s", - self.job_R.id, RFC822SyntaxError(None, 3, - "Unexpected non-empty line: " - "'I-sound-like-a-broken-record\\n'")) + self.job_R.id, + RFC822SyntaxError( + None, + 3, + "Unexpected non-empty line: 'I-sound-like-a-broken-record\\n'", + ), + ) def test_desire_job_X_updates_state_map(self): # This function checks what happens when the job X becomes desired via @@ -688,15 +783,17 @@ def test_desire_job_X_updates_state_map(self): self.assertEqual(self.session.run_list, [self.job_Y, self.job_X]) # As in the A - R test above this also recomputes the job readiness # state. Job Y is now runnable but job X has a PENDING_DEP inhibitor. - self.assertEqual(self.job_state('Y').readiness_inhibitor_list, []) + self.assertEqual(self.job_state("Y").readiness_inhibitor_list, []) # While the A job still cannot run it now has a different inhibitor, # one with the PENDING_RESOURCE cause. The inhibitor also properly # pinpoints the related job and related expression. - self.assertNotEqual(self.job_state('X').readiness_inhibitor_list, []) - self.assertEqual(self.job_inhibitor('X', 0).cause, - InhibitionCause.PENDING_DEP) - self.assertEqual(self.job_inhibitor('X', 0).related_job, self.job_Y) - self.assertFalse(self.job_state('X').can_start()) + self.assertNotEqual(self.job_state("X").readiness_inhibitor_list, []) + self.assertEqual( + self.job_inhibitor("X", 0).cause, + InhibitionCause.PENDING_DEP, + ) + self.assertEqual(self.job_inhibitor("X", 0).related_job, self.job_Y) + self.assertFalse(self.job_state("X").can_start()) def test_desired_job_X_cannot_run_with_failed_job_Y(self): # This function checks how SessionState reacts when the desired job X @@ -704,111 +801,133 @@ def test_desired_job_X_cannot_run_with_failed_job_Y(self): self.session.update_desired_job_list([self.job_X]) # When X is desired, as above, it should be inhibited with PENDING_DEP # on Y - self.assertNotEqual(self.job_state('X').readiness_inhibitor_list, []) - self.assertEqual(self.job_inhibitor('X', 0).cause, - InhibitionCause.PENDING_DEP) - self.assertEqual(self.job_inhibitor('X', 0).related_job, self.job_Y) - self.assertFalse(self.job_state('X').can_start()) + self.assertNotEqual(self.job_state("X").readiness_inhibitor_list, []) + self.assertEqual( + self.job_inhibitor("X", 0).cause, + InhibitionCause.PENDING_DEP, + ) + self.assertEqual(self.job_inhibitor("X", 0).related_job, self.job_Y) + self.assertFalse(self.job_state("X").can_start()) # When a failed Y result is presented X should switch to FAILED_DEP - result_Y = MemoryJobResult({'outcome': IJobResult.OUTCOME_FAIL}) + result_Y = MemoryJobResult({"outcome": IJobResult.OUTCOME_FAIL}) self.session.update_job_result(self.job_Y, result_Y) # Now job X should have a FAILED_DEP inhibitor instead of the # PENDING_DEP it had before. Everything else should stay as-is. - self.assertNotEqual(self.job_state('X').readiness_inhibitor_list, []) - self.assertEqual(self.job_inhibitor('X', 0).cause, - InhibitionCause.FAILED_DEP) - self.assertEqual(self.job_inhibitor('X', 0).related_job, self.job_Y) - self.assertFalse(self.job_state('X').can_start()) + self.assertNotEqual(self.job_state("X").readiness_inhibitor_list, []) + self.assertEqual( + self.job_inhibitor("X", 0).cause, + InhibitionCause.FAILED_DEP, + ) + self.assertEqual(self.job_inhibitor("X", 0).related_job, self.job_Y) + self.assertFalse(self.job_state("X").can_start()) def test_desired_job_X_can_run_with_passing_job_Y(self): # A variant of the test case above, simply Y passes this time, making X # runnable self.session.update_desired_job_list([self.job_X]) - result_Y = MemoryJobResult({'outcome': IJobResult.OUTCOME_PASS}) + result_Y = MemoryJobResult({"outcome": IJobResult.OUTCOME_PASS}) self.session.update_job_result(self.job_Y, result_Y) # Now X is runnable - self.assertEqual(self.job_state('X').readiness_inhibitor_list, []) - self.assertTrue(self.job_state('X').can_start()) + self.assertEqual(self.job_state("X").readiness_inhibitor_list, []) + self.assertTrue(self.job_state("X").can_start()) def test_desired_job_X_cannot_run_with_no_resource_R(self): # A variant of the two test cases above, using A-R jobs self.session.update_desired_job_list([self.job_A]) - result_R = MemoryJobResult({ - 'outcome': IJobResult.OUTCOME_PASS, - 'io_log': [(0, 'stdout', b'attr: wrong value\n')], - }) + result_R = MemoryJobResult( + { + "outcome": IJobResult.OUTCOME_PASS, + "io_log": [(0, "stdout", b"attr: wrong value\n")], + } + ) self.session.update_job_result(self.job_R, result_R) # Now A is inhibited by FAILED_RESOURCE - self.assertNotEqual(self.job_state('A').readiness_inhibitor_list, []) - self.assertEqual(self.job_inhibitor('A', 0).cause, - InhibitionCause.FAILED_RESOURCE) - self.assertEqual(self.job_inhibitor('A', 0).related_job, self.job_R) - self.assertEqual(self.job_inhibitor('A', 0).related_expression, - self.job_A_expr) - self.assertFalse(self.job_state('A').can_start()) + self.assertNotEqual(self.job_state("A").readiness_inhibitor_list, []) + self.assertEqual( + self.job_inhibitor("A", 0).cause, InhibitionCause.FAILED_RESOURCE + ) + self.assertEqual(self.job_inhibitor("A", 0).related_job, self.job_R) + self.assertEqual( + self.job_inhibitor("A", 0).related_expression, + self.job_A_expr, + ) + self.assertFalse(self.job_state("A").can_start()) def test_resource_job_result_overwrites_old_resources(self): # This function checks what happens when a JobResult for job R is # presented to a session that has some resources from that job already. - result_R_old = MemoryJobResult({ - 'outcome': IJobResult.OUTCOME_PASS, - 'io_log': [(0, 'stdout', b"attr: old value\n")] - }) + result_R_old = MemoryJobResult( + { + "outcome": IJobResult.OUTCOME_PASS, + "io_log": [(0, "stdout", b"attr: old value\n")], + } + ) self.session.update_job_result(self.job_R, result_R_old) # So here the old result is stored into a new 'R' resource - expected_before = {'R': [Resource({'attr': 'old value'})]} + expected_before = {"R": [Resource({"attr": "old value"})]} self.assertEqual(self.session._resource_map, expected_before) # Now we present the second result for the same job - result_R_new = MemoryJobResult({ - 'outcome': IJobResult.OUTCOME_PASS, - 'io_log': [(0, 'stdout', b"attr: new value\n")] - }) + result_R_new = MemoryJobResult( + { + "outcome": IJobResult.OUTCOME_PASS, + "io_log": [(0, "stdout", b"attr: new value\n")], + } + ) self.session.update_job_result(self.job_R, result_R_new) # What should happen here is that the R resource is entirely replaced # by the data from the new result. The data should not be merged or # appended in any way. - expected_after = {'R': [Resource({'attr': 'new value'})]} + expected_after = {"R": [Resource({"attr": "new value"})]} self.assertEqual(self.session._resource_map, expected_after) def test_get_outcome_stats(self): - result_A = MemoryJobResult({'outcome': IJobResult.OUTCOME_PASS}) - result_R = MemoryJobResult({'outcome': IJobResult.OUTCOME_FAIL}) - result_Y = MemoryJobResult({'outcome': IJobResult.OUTCOME_FAIL}) + result_A = MemoryJobResult({"outcome": IJobResult.OUTCOME_PASS}) + result_R = MemoryJobResult({"outcome": IJobResult.OUTCOME_FAIL}) + result_Y = MemoryJobResult({"outcome": IJobResult.OUTCOME_FAIL}) self.session.update_job_result(self.job_A, result_A) self.session.update_job_result(self.job_R, result_R) self.session.update_job_result(self.job_Y, result_Y) - self.assertEqual(self.session.get_outcome_stats(), - {IJobResult.OUTCOME_PASS: 1, - IJobResult.OUTCOME_FAIL: 2}) + self.assertEqual( + self.session.get_outcome_stats(), + {IJobResult.OUTCOME_PASS: 1, IJobResult.OUTCOME_FAIL: 2}, + ) def test_get_certification_status_map(self): - result_A = MemoryJobResult({'outcome': IJobResult.OUTCOME_PASS}) + result_A = MemoryJobResult({"outcome": IJobResult.OUTCOME_PASS}) self.session.update_job_result(self.job_A, result_A) - self.session.job_state_map[ - self.job_A.id].effective_certification_status = 'foo' + self.session.job_state_map[self.job_A.id].effective_certification_status = "foo" self.assertEqual(self.session.get_certification_status_map(), {}) - self.assertEqual(self.session.get_certification_status_map( - outcome_filter=(IJobResult.OUTCOME_PASS,), - certification_status_filter=('foo',)), - {self.job_A.id: self.session.job_state_map[self.job_A.id]}) - result_Y = MemoryJobResult({'outcome': IJobResult.OUTCOME_FAIL}) - self.session.job_state_map[ - self.job_Y.id].effective_certification_status = 'bar' + self.assertEqual( + self.session.get_certification_status_map( + outcome_filter=(IJobResult.OUTCOME_PASS,), + certification_status_filter=("foo",), + ), + {self.job_A.id: self.session.job_state_map[self.job_A.id]}, + ) + result_Y = MemoryJobResult({"outcome": IJobResult.OUTCOME_FAIL}) + self.session.job_state_map[self.job_Y.id].effective_certification_status = "bar" self.assertEqual(self.session.get_certification_status_map(), {}) - self.assertEqual(self.session.get_certification_status_map( - outcome_filter=(IJobResult.OUTCOME_PASS, IJobResult.OUTCOME_FAIL), - certification_status_filter=('foo', 'bar')), - {self.job_A.id: self.session.job_state_map[self.job_A.id]}) + self.assertEqual( + self.session.get_certification_status_map( + outcome_filter=(IJobResult.OUTCOME_PASS, IJobResult.OUTCOME_FAIL), + certification_status_filter=("foo", "bar"), + ), + {self.job_A.id: self.session.job_state_map[self.job_A.id]}, + ) self.session.update_job_result(self.job_Y, result_Y) - self.assertEqual(self.session.get_certification_status_map( - outcome_filter=(IJobResult.OUTCOME_PASS, IJobResult.OUTCOME_FAIL), - certification_status_filter=('foo', 'bar')), - {self.job_A.id: self.session.job_state_map[self.job_A.id], - self.job_Y.id: self.session.job_state_map[self.job_Y.id]}) + self.assertEqual( + self.session.get_certification_status_map( + outcome_filter=(IJobResult.OUTCOME_PASS, IJobResult.OUTCOME_FAIL), + certification_status_filter=("foo", "bar"), + ), + { + self.job_A.id: self.session.job_state_map[self.job_A.id], + self.job_Y.id: self.session.job_state_map[self.job_Y.id], + }, + ) class SessionMetadataTests(TestCase): - def test_smoke(self): metadata = SessionMetaData() self.assertEqual(metadata.title, None) @@ -817,7 +936,8 @@ def test_smoke(self): def test_initializer(self): metadata = SessionMetaData( - title="title", flags=['f1', 'f2'], running_job_name='id') + title="title", flags=["f1", "f2"], running_job_name="id" + ) self.assertEqual(metadata.title, "title") self.assertEqual(metadata.flags, set(["f1", "f2"])) self.assertEqual(metadata.running_job_name, "id") @@ -833,18 +953,18 @@ def test_accessors(self): def test_app_blob_default_value(self): metadata = SessionMetaData() - self.assertIs(metadata.app_blob, b'') + self.assertIs(metadata.app_blob, b"") def test_app_blob_assignment(self): metadata = SessionMetaData() - metadata.app_blob = b'blob' - self.assertEqual(metadata.app_blob, b'blob') + metadata.app_blob = b"blob" + self.assertEqual(metadata.app_blob, b"blob") metadata.app_blob = None self.assertEqual(metadata.app_blob, None) def test_app_blob_kwarg_to_init(self): - metadata = SessionMetaData(app_blob=b'blob') - self.assertEqual(metadata.app_blob, b'blob') + metadata = SessionMetaData(app_blob=b"blob") + self.assertEqual(metadata.app_blob, b"blob") def test_app_id_default_value(self): metadata = SessionMetaData() @@ -852,31 +972,35 @@ def test_app_id_default_value(self): def test_app_id_assignment(self): metadata = SessionMetaData() - metadata.app_id = 'com.canonical.certification.plainbox' + metadata.app_id = "com.canonical.certification.plainbox" self.assertEqual( - metadata.app_id, 'com.canonical.certification.plainbox') + metadata.app_id, + "com.canonical.certification.plainbox", + ) metadata.app_id = None self.assertEqual(metadata.app_id, None) def test_app_id_kwarg_to_init(self): metadata = SessionMetaData( - app_id='com.canonical.certification.plainbox') + app_id="com.canonical.certification.plainbox", + ) self.assertEqual( - metadata.app_id, 'com.canonical.certification.plainbox') + metadata.app_id, + "com.canonical.certification.plainbox", + ) class SessionDeviceContextTests(SignalTestCase): - def setUp(self): self.ctx = SessionDeviceContext() - self.provider = mock.Mock(name='provider', spec_set=Provider1) - self.unit = mock.Mock(name='unit', spec_set=UnitWithId) + self.provider = mock.Mock(name="provider", spec_set=Provider1) + self.unit = mock.Mock(name="unit", spec_set=UnitWithId) self.unit.provider = self.provider self.provider.unit_list = [self.unit] self.provider.problem_list = [] - self.job = mock.Mock(name='job', spec_set=JobDefinition, siblings=None) + self.job = mock.Mock(name="job", spec_set=JobDefinition, siblings=None) self.job.get_flag_set = mock.Mock(return_value=()) - self.job.Meta.name = 'job' + self.job.Meta.name = "job" def test_smoke(self): """ @@ -1036,7 +1160,9 @@ def test_on_unit_removed__via_ctx(self): self.ctx.remove_unit(self.unit) sig1 = self.assertSignalFired(self.ctx.on_unit_removed, self.unit) sig2 = self.assertSignalFired( - self.ctx.state.on_unit_removed, self.unit) + self.ctx.state.on_unit_removed, + self.unit, + ) self.assertSignalOrdering(sig1, sig2) self.assertSignalNotFired(self.ctx.state.on_job_removed, self.unit) @@ -1053,7 +1179,9 @@ def test_on_unit_removed__via_state(self): self.ctx.state.remove_unit(self.unit) sig1 = self.assertSignalFired(self.ctx.on_unit_removed, self.unit) sig2 = self.assertSignalFired( - self.ctx.state.on_unit_removed, self.unit) + self.ctx.state.on_unit_removed, + self.unit, + ) self.assertSignalOrdering(sig1, sig2) self.assertSignalNotFired(self.ctx.state.on_job_removed, self.unit) diff --git a/checkbox-ng/plainbox/impl/unit/packaging.py b/checkbox-ng/plainbox/impl/unit/packaging_metadata.py similarity index 100% rename from checkbox-ng/plainbox/impl/unit/packaging.py rename to checkbox-ng/plainbox/impl/unit/packaging_metadata.py diff --git a/checkbox-ng/plainbox/impl/unit/test_packaging.py b/checkbox-ng/plainbox/impl/unit/test_packaging_metadata.py similarity index 98% rename from checkbox-ng/plainbox/impl/unit/test_packaging.py rename to checkbox-ng/plainbox/impl/unit/test_packaging_metadata.py index 98e17065e4..77fe61c4ab 100644 --- a/checkbox-ng/plainbox/impl/unit/test_packaging.py +++ b/checkbox-ng/plainbox/impl/unit/test_packaging_metadata.py @@ -21,9 +21,9 @@ from unittest import TestCase import textwrap -from plainbox.impl.unit.packaging import DebianPackagingDriver -from plainbox.impl.unit.packaging import PackagingDriverBase -from plainbox.impl.unit.packaging import PackagingMetaDataUnit +from plainbox.impl.unit.packaging_metadata import DebianPackagingDriver +from plainbox.impl.unit.packaging_metadata import PackagingDriverBase +from plainbox.impl.unit.packaging_metadata import PackagingMetaDataUnit from plainbox.impl.unit.test_unit import UnitFieldValidationTests from plainbox.impl.unit.validators import UnitValidationContext from plainbox.impl.validation import Problem diff --git a/checkbox-ng/plainbox/provider_manager.py b/checkbox-ng/plainbox/provider_manager.py index d3a9f48f21..a4c5860169 100644 --- a/checkbox-ng/plainbox/provider_manager.py +++ b/checkbox-ng/plainbox/provider_manager.py @@ -64,8 +64,8 @@ from plainbox.impl.secure.providers.v1 import Provider1 from plainbox.impl.secure.providers.v1 import Provider1Definition from plainbox.impl.secure.rfc822 import RFC822SyntaxError -from plainbox.impl.unit.packaging import PackagingDriverError -from plainbox.impl.unit.packaging import get_packaging_driver +from plainbox.impl.unit.packaging_metadata import PackagingDriverError +from plainbox.impl.unit.packaging_metadata import get_packaging_driver from plainbox.impl.unit.unit_with_id import UnitWithId from plainbox.impl.unit.validators import UnitValidationContext from plainbox.impl.validation import Issue diff --git a/checkbox-ng/pyproject.toml b/checkbox-ng/pyproject.toml index cd5dcaadd2..e9548c7484 100644 --- a/checkbox-ng/pyproject.toml +++ b/checkbox-ng/pyproject.toml @@ -66,7 +66,7 @@ category = "plainbox.impl.unit.category:CategoryUnit" 'test plan' = "plainbox.impl.unit.testplan:TestPlanUnit" 'manifest entry' = "plainbox.impl.unit.manifest:ManifestEntryUnit" - 'packaging meta-data' = "plainbox.impl.unit.packaging:PackagingMetaDataUnit" + 'packaging meta-data' = "plainbox.impl.unit.packaging_metadata:PackagingMetaDataUnit" exporter = "plainbox.impl.unit.exporter:ExporterUnit" [project.entry-points."plainbox.parsers"] pxu = "plainbox.impl.secure.rfc822:load_rfc822_records" diff --git a/checkbox-ng/setup.cfg b/checkbox-ng/setup.cfg index b54f412d54..8a6a2abacd 100644 --- a/checkbox-ng/setup.cfg +++ b/checkbox-ng/setup.cfg @@ -35,7 +35,7 @@ plainbox.unit= category=plainbox.impl.unit.category:CategoryUnit test plan=plainbox.impl.unit.testplan:TestPlanUnit manifest entry=plainbox.impl.unit.manifest:ManifestEntryUnit - packaging meta-data=plainbox.impl.unit.packaging:PackagingMetaDataUnit + packaging meta-data=plainbox.impl.unit.packaging_metadata:PackagingMetaDataUnit exporter=plainbox.impl.unit.exporter:ExporterUnit plainbox.parsers= pxu=plainbox.impl.secure.rfc822:load_rfc822_records From 6ccc8cd0aa53f2f0ffe6cc3462f7c20a254c5597 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Mon, 22 Jan 2024 19:07:22 +0100 Subject: [PATCH 008/108] Update contruct to v2.10.57 (bugfix) (#957) * Updated to 2.10.57 - Modify the module get mechanism to be backward compatible with python3.4 - Modify the size calculator to be compatible with python3.4 ('e' not supported) * Updated to 2.10.57 Minor: whitespaces * Updated to 2.10.57 --- .../checkbox_support/vendor/construct/core.py | 38 ++++++++++++------- .../vendor/construct/debug.py | 10 ++--- .../vendor/construct/lib/binary.py | 4 +- .../vendor/construct/version.py | 6 +-- 4 files changed, 35 insertions(+), 23 deletions(-) diff --git a/checkbox-support/checkbox_support/vendor/construct/core.py b/checkbox-support/checkbox_support/vendor/construct/core.py index 26b8442fda..1bdd343288 100644 --- a/checkbox-support/checkbox_support/vendor/construct/core.py +++ b/checkbox-support/checkbox_support/vendor/construct/core.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -import struct, io, binascii, itertools, collections, pickle, sys, os, tempfile, hashlib, importlib, imp +import struct, io, binascii, itertools, collections, pickle, sys, os, tempfile, hashlib, importlib from checkbox_support.vendor.construct.lib import * from checkbox_support.vendor.construct.expr import * @@ -305,7 +305,7 @@ def parse_file(self, filename, **contextkw): r""" Parse a closed binary file. See parse(). """ - with io.open(filename, 'rb') as f: + with open(filename, 'rb') as f: return self.parse_stream(f, **contextkw) def _parsereport(self, stream, context, path): @@ -351,7 +351,7 @@ def build_file(self, obj, filename, **contextkw): r""" Build an object into a closed binary file. See build(). """ - with io.open(filename, 'wb') as f: + with open(filename, 'wb') as f: self.build_stream(obj, f, **contextkw) def _build(self, obj, stream, context, path): @@ -439,7 +439,13 @@ def parseall(io, this): f.write(source) modulename = hexlify(hashlib.sha1(source.encode()).digest()).decode() - module = imp.new_module(modulename) + try: + from importlib.util import module_from_spec + module_spec = importlib.machinery.ModuleSpec(modulename, None) + module = module_from_spec(module_spec) + except ImportError: + # compatibility with python3.4 + module = importlib.types.ModuleType(modulename) c = compile(source, '', 'exec') exec(c, module.__dict__) @@ -801,6 +807,8 @@ class Bytes(Construct): Parses into a bytes (of given length). Builds into the stream directly (but checks that given object matches specified length). Can also build from an integer for convenience (although BytesInteger should be used instead). Size is the specified length. + Can also build from a bytearray. + :param length: integer or context lambda :raises StreamError: requested reading negative amount, could not read enough bytes, requested writing different amount than actual data, or could not write all bytes @@ -841,6 +849,7 @@ def _parse(self, stream, context, path): def _build(self, obj, stream, context, path): length = self.length(context) if callable(self.length) else self.length data = integer2bytes(obj, length) if isinstance(obj, int) else obj + data = bytes(data) if type(data) is bytearray else data stream_write(stream, data, length, path) return data @@ -864,6 +873,8 @@ class GreedyBytes(Construct): Parses the stream to the end. Builds into the stream directly (without checks). Size is undefined. + Can also build from a bytearray. + :raises StreamError: stream failed when reading until EOF :raises StringError: building from non-bytes value, perhaps unicode @@ -879,8 +890,9 @@ def _parse(self, stream, context, path): return stream_read_entire(stream, path) def _build(self, obj, stream, context, path): - stream_write(stream, obj, len(obj), path) - return obj + data = bytes(obj) if type(obj) is bytearray else obj + stream_write(stream, data, len(data), path) + return data def _emitparse(self, code): return "io.read()" @@ -981,7 +993,7 @@ class FormatField(Construct): Parses into an integer. Builds from an integer into specified byte count and endianness. Size is determined by `struct` module according to specified format string. :param endianity: string, character like: < > = - :param format: string, character like: f d B H L Q b h l q + :param format: string, character like: f d B H L Q b h l q e :raises StreamError: requested reading negative amount, could not read enough bytes, requested writing different amount than actual data, or could not write all bytes :raises FormatFieldError: wrong format string, or struct.(un)pack complained about the value @@ -1551,7 +1563,7 @@ def PascalString(lengthfield, encoding): macro = StringEncoded(Prefixed(lengthfield, GreedyBytes), encoding) def _emitseq(ksy, bitwise): return [ - dict(id="lengthfield", type=lengthfield._compileprimitivetype(ksy, bitwise)), + dict(id="lengthfield", type=lengthfield._compileprimitivetype(ksy, bitwise)), dict(id="data", size="lengthfield", type="str", encoding=encoding), ] macro._emitseq = _emitseq @@ -1900,7 +1912,7 @@ class Struct(Construct): r""" Sequence of usually named constructs, similar to structs in C. The members are parsed and build in the order they are defined. If a member is anonymous (its name is None) then it gets parsed and the value discarded, or it gets build from nothing (from None). - Some fields do not need to be named, since they are built without value anyway. See: Const Padding Check Error Pass Terminated Seek Tell for examples of such fields. + Some fields do not need to be named, since they are built without value anyway. See: Const Padding Check Error Pass Terminated Seek Tell for examples of such fields. Operator + can also be used to make Structs (although not recommended). @@ -2954,7 +2966,7 @@ class Numpy(Construct): Parses using `numpy.load() `_ and builds using `numpy.save() `_ functions, using Numpy binary protocol. Size is undefined. :raises ImportError: numpy could not be imported during parsing or building - :raises StreamError: requested reading negative amount, could not read enough bytes, requested writing different amount than actual data, or could not write all bytes + :raises ValueError: could not read enough bytes, or so Can propagate numpy.load() and numpy.save() exceptions. @@ -3262,7 +3274,7 @@ class Union(Construct): Example:: - >>> d = Union(0, + >>> d = Union(0, ... "raw" / Bytes(8), ... "ints" / Int32ub[2], ... "shorts" / Int16ub[4], @@ -4400,7 +4412,7 @@ def _emitparse(self, code): def _emitseq(self, ksy, bitwise): return [ - dict(id="lengthfield", type=self.lengthfield._compileprimitivetype(ksy, bitwise)), + dict(id="lengthfield", type=self.lengthfield._compileprimitivetype(ksy, bitwise)), dict(id="data", size="lengthfield", type=self.subcon._compileprimitivetype(ksy, bitwise)), ] @@ -4442,7 +4454,7 @@ def _actualsize(self, stream, context, path): macro._actualsize = _actualsize def _emitseq(ksy, bitwise): return [ - dict(id="countfield", type=countfield._compileprimitivetype(ksy, bitwise)), + dict(id="countfield", type=countfield._compileprimitivetype(ksy, bitwise)), dict(id="data", type=subcon._compileprimitivetype(ksy, bitwise), repeat="expr", repeat_expr="countfield"), ] macro._emitseq = _emitseq diff --git a/checkbox-support/checkbox_support/vendor/construct/debug.py b/checkbox-support/checkbox_support/vendor/construct/debug.py index fae361ea6e..9704665a7a 100644 --- a/checkbox-support/checkbox_support/vendor/construct/debug.py +++ b/checkbox-support/checkbox_support/vendor/construct/debug.py @@ -22,9 +22,9 @@ class Probe(Construct): -------------------------------------------------- Probe, path is (parsing), into is None Stream peek: (hexlified) b'010203'... - Container: + Container: count = 5 - items = ListContainer: + items = ListContainer: 97 98 99 @@ -95,11 +95,11 @@ def printout(self, stream, context, path): class Debugger(Subconstruct): r""" PDB-based debugger. When an exception occurs in the subcon, a debugger will appear and allow you to debug the error (and even fix it on-the-fly). - + :param subcon: Construct instance, subcon to debug - + Example:: - + >>> Debugger(Byte[3]).build([]) -------------------------------------------------- diff --git a/checkbox-support/checkbox_support/vendor/construct/lib/binary.py b/checkbox-support/checkbox_support/vendor/construct/lib/binary.py index 799a6300e0..08d3c9bc94 100644 --- a/checkbox-support/checkbox_support/vendor/construct/lib/binary.py +++ b/checkbox-support/checkbox_support/vendor/construct/lib/binary.py @@ -90,7 +90,7 @@ def bytes2integer(data, signed=False): BYTES2BITS_CACHE = {i:integer2bits(i,8) for i in range(256)} def bytes2bits(data): - r""" + r""" Converts between bit and byte representations in b-strings. Example: @@ -103,7 +103,7 @@ def bytes2bits(data): BITS2BYTES_CACHE = {bytes2bits(int2byte(i)):int2byte(i) for i in range(256)} def bits2bytes(data): - r""" + r""" Converts between bit and byte representations in b-strings. Example: diff --git a/checkbox-support/checkbox_support/vendor/construct/version.py b/checkbox-support/checkbox_support/vendor/construct/version.py index df71096ddb..513a82f3af 100644 --- a/checkbox-support/checkbox_support/vendor/construct/version.py +++ b/checkbox-support/checkbox_support/vendor/construct/version.py @@ -1,3 +1,3 @@ -version = (2,10,53) -version_string = "2.10.53" -release_date = "2020.01.19" +version = (2,10,57) +version_string = "2.10.57" +release_date = "2021.01.26" From 958ec61801dbfd04d229da829518236ae3023d69 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Tue, 23 Jan 2024 11:03:36 +0100 Subject: [PATCH 009/108] Changed variable name to environment from environ (bugfix) (#961) Changed variable name to environment from environ --- .../checkbox_support/snap_utils/tests/test_config.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/checkbox-support/checkbox_support/snap_utils/tests/test_config.py b/checkbox-support/checkbox_support/snap_utils/tests/test_config.py index 9f4ff9b9b5..acccffa472 100644 --- a/checkbox-support/checkbox_support/snap_utils/tests/test_config.py +++ b/checkbox-support/checkbox_support/snap_utils/tests/test_config.py @@ -85,17 +85,17 @@ def test_smoke(self): m = mock_open() with patch('builtins.open', m): write_checkbox_conf({'foo': 'bar'}) - m().write.assert_called_with('[environ]\n') - m().write.assert_called_with('FOO = bar\n') - m().write.assert_called_with('\n') + m().write.assert_any_call('[environment]\n') + m().write.assert_any_call('FOO = bar\n') + m().write.assert_any_call('\n') self.assertEqual(m().write.call_count, 3) def test_writes_empty(self): m = mock_open() with patch('builtins.open', m): write_checkbox_conf({}) - m().write.assert_called_with('[environ]\n') - m().write.assert_called_with('\n') + m().write.assert_any_call('[environment]\n') + m().write.assert_any_call('\n') self.assertEqual(m().write.call_count, 2) From da6baa34d648d36278d94eaef49beb86010ba53e Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Tue, 23 Jan 2024 18:00:10 +0100 Subject: [PATCH 010/108] Mock module and explain why it was done (Bugfix) (#962) Mock module and explain why it was done --- checkbox-ng/plainbox/impl/secure/origin.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/checkbox-ng/plainbox/impl/secure/origin.py b/checkbox-ng/plainbox/impl/secure/origin.py index e6a64d7fe4..5000cf6926 100644 --- a/checkbox-ng/plainbox/impl/secure/origin.py +++ b/checkbox-ng/plainbox/impl/secure/origin.py @@ -25,6 +25,7 @@ import functools import inspect import os +import sys from plainbox.abc import ITextSource from plainbox.i18n import gettext as _ @@ -185,6 +186,19 @@ def get_caller_origin(cls, back=0): """ # Create an Origin instance that pinpoints the place that called # get_caller_origin(). + + # This avoids a sideffect of using inspect.stack(0) when the urwid + # module is in said stack. The python3-urwid + # package is currently partially broken on Ubuntu Noble. inspect.stack + # activates a sideffect of the urwid.__init__ module that imports + # urwid.display.web. That can not be done because the web module is + # missing the _web.js and _web.css. We don't need this part of urwid but + # given the chain described it is improted anyway. The following line + # will only make the import go through. + # See: https://packages.ubuntu.com/noble/amd64/python3-urwid/filelist + # FIXME: remove this line once the files are back + sys.modules["urwid.display.web"] = {} + caller_frame, filename, lineno = inspect.stack(0)[2 + back][:3] try: source = PythonFileTextSource(filename) From 5a26768f1724f60a0d7c0bad325b39d7fd25f7c7 Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Tue, 23 Jan 2024 19:19:20 +0100 Subject: [PATCH 011/108] Adding metabox tests to packaging (Infra) (#960) * Added new metabox test for packaging versions This test makes sure that the current packaging driver is applicable for a specific os version. * Added assert in file * Fixed run_manage and assert_in_file actions * Cleaned previous implementation of the tests * Apply suggestions from code review Co-authored-by: Massimiliano * Applied the rest of the PR comments * Added a Xenial scenario --------- Co-authored-by: Massimiliano --- .../plainbox/impl/session/test_state.py | 2 +- metabox/metabox/core/actions.py | 10 +- metabox/metabox/core/scenario.py | 57 ++++++++-- .../metabox/metabox-provider/debian/control | 6 + .../metabox-provider/units/basic-jobs.pxu | 2 +- .../metabox/scenarios/packaging/__init__.py | 0 .../scenarios/packaging/deb_packaging.py | 105 ++++++++++++++++++ .../metabox/scenarios/packaging/packaging.pxu | 29 +++++ 8 files changed, 198 insertions(+), 13 deletions(-) create mode 100644 metabox/metabox/metabox-provider/debian/control create mode 100644 metabox/metabox/scenarios/packaging/__init__.py create mode 100644 metabox/metabox/scenarios/packaging/deb_packaging.py create mode 100644 metabox/metabox/scenarios/packaging/packaging.pxu diff --git a/checkbox-ng/plainbox/impl/session/test_state.py b/checkbox-ng/plainbox/impl/session/test_state.py index 839e294cd9..2adafd9763 100644 --- a/checkbox-ng/plainbox/impl/session/test_state.py +++ b/checkbox-ng/plainbox/impl/session/test_state.py @@ -1215,4 +1215,4 @@ def test_on_job_removed__via_state(self): sig1 = self.assertSignalFired(self.ctx.on_unit_removed, self.job) sig2 = self.assertSignalFired(self.ctx.state.on_unit_removed, self.job) sig3 = self.assertSignalFired(self.ctx.state.on_job_removed, self.job) - self.assertSignalOrdering(sig1, sig2, sig3) + self.assertSignalOrdering(sig1, sig2, sig3) \ No newline at end of file diff --git a/metabox/metabox/core/actions.py b/metabox/metabox/core/actions.py index 44c6351c31..e1032b2c3f 100644 --- a/metabox/metabox/core/actions.py +++ b/metabox/metabox/core/actions.py @@ -25,7 +25,7 @@ "Start", "Expect", "Send", "SelectTestPlan", "AssertPrinted", "AssertNotPrinted", "AssertRetCode", "AssertAgentActive", "Sleep", "RunCmd", "Signal", "Reboot", - "NetUp", "NetDown", "Put", "MkTree" + "NetUp", "NetDown", "Put", "MkTree", "RunManage", "AssertInFile" ] @@ -103,3 +103,11 @@ class Put(ActionBase): class MkTree(ActionBase): handler = "mktree" + + +class RunManage(ActionBase): + handler = "run_manage" + + +class AssertInFile(ActionBase): + handler = "assert_in_file" diff --git a/metabox/metabox/core/scenario.py b/metabox/metabox/core/scenario.py index ae48c7a88d..992b3cb67e 100644 --- a/metabox/metabox/core/scenario.py +++ b/metabox/metabox/core/scenario.py @@ -22,6 +22,7 @@ See Scenario class properties and the assert_* functions, as they serve as the interface to a Scenario. """ +from pathlib import Path import re import time import shlex @@ -58,7 +59,7 @@ def __init__( mode, *releases, controller_revision="current", - agent_revision="current" + agent_revision="current", ): self.mode = mode self.releases = releases @@ -68,11 +69,12 @@ def __init__( self.controller_revision = controller_revision self.agent_machine = None self.agent_revision = agent_revision + self.start_session = True self._checks = [] self._ret_code = None self._stdout = "" self._stderr = "" - self._oudstr_full = "" + self._outstr_full = "" self._pts = None def get_output_streams(self): @@ -86,7 +88,12 @@ def has_passed(self): def run(self): # Simple scenarios don't need to specify a START step - if not any([isinstance(s, Start) for s in self.steps]): + # If there's no START step, add one unless the scenario + # explicitly says not to start a session + if ( + not any(isinstance(s, Start) for s in self.steps) + and self.start_session + ): self.steps.insert(0, Start()) for i, step in enumerate(self.steps): # Check how to start checkbox, interactively or not @@ -94,7 +101,7 @@ def run(self): interactive = False # CHECK if any EXPECT/SEND command follows # w/o a new call to START before it - for next_step in self.steps[i + 1 :]: + for next_step in self.steps[i + 1:]: if isinstance(next_step, Start): break if isinstance(next_step, (Expect, Send, SelectTestPlan)): @@ -160,8 +167,8 @@ def assertEqual(self, first, second): def assertNotEqual(self, first, second): self._checks.append(first != second) - def start(self, cmd='', interactive=False, timeout=0): - if self.mode == 'remote': + def start(self, cmd="", interactive=False, timeout=0): + if self.mode == "remote": outcome = self.start_all(interactive=interactive, timeout=timeout) if interactive: self._pts = outcome @@ -230,14 +237,22 @@ def select_test_plan(self, testplan_id, timeout=60): def run_cmd(self, cmd, env={}, interactive=False, timeout=0, target="all"): if self.mode == "remote": if target == "controller": - self.controller_machine.run_cmd(cmd, env, interactive, timeout) + result = self.controller_machine.run_cmd( + cmd, env, interactive, timeout + ) elif target == "agent": - self.agent_machine.run_cmd(cmd, env, interactive, timeout) + result = self.agent_machine.run_cmd( + cmd, env, interactive, timeout + ) else: self.controller_machine.run_cmd(cmd, env, interactive, timeout) - self.agent_machine.run_cmd(cmd, env, interactive, timeout) + result = self.agent_machine.run_cmd( + cmd, env, interactive, timeout + ) else: - self.local_machine.run_cmd(cmd, env, interactive, timeout) + result = self.local_machine.run_cmd(cmd, env, interactive, timeout) + + return result def reboot(self, timeout=0, target="all"): if self.mode == "remote": @@ -305,3 +320,25 @@ def mktree(self, path, privileged=False, timeout=0, target="all"): cmd = ["sudo"] + cmd cmd_str = shlex.join(cmd) self.run_cmd(cmd_str, target=target, timeout=timeout) + + def run_manage(self, args, timeout=0, target="all"): + """ + Runs the manage.py script with some arguments + """ + path = "/home/ubuntu/checkbox/metabox/metabox/metabox-provider" + cmd = f"bash -c 'cd {path} ; python3 manage.py {args}'" + self.run_cmd(cmd, target=target, timeout=timeout) + + def assert_in_file(self, pattern, path): + """ + Check if a file created during Checkbox execution contains text that + matches the pattern. + :param patter: regular expresion to check against the lines. + :param path: path to the file + """ + if isinstance(path, Path): + path = str(path) + + result = self.run_cmd(f"cat {path}") + regex = re.compile(pattern) + self._checks.append(bool(regex.search(result.stdout))) diff --git a/metabox/metabox/metabox-provider/debian/control b/metabox/metabox/metabox-provider/debian/control new file mode 100644 index 0000000000..5d44d2c91f --- /dev/null +++ b/metabox/metabox/metabox-provider/debian/control @@ -0,0 +1,6 @@ +Package: metabox-provider +Depends: ${plainbox:Depends} +Recommends: ${plainbox:Recommends} +Suggests: ${plainbox:Suggests} +X-Plainbox-Provider: yes +Description: Metabox provider for testing purposes diff --git a/metabox/metabox/metabox-provider/units/basic-jobs.pxu b/metabox/metabox/metabox-provider/units/basic-jobs.pxu index c5354c1568..92d7431f18 100644 --- a/metabox/metabox/metabox-provider/units/basic-jobs.pxu +++ b/metabox/metabox/metabox-provider/units/basic-jobs.pxu @@ -246,4 +246,4 @@ plugin: shell id: dependency_installation command: dependency_installation.py _summary: Verify that all required modules are installed -_description: Verify that all required modules are installed +_description: Verify that all required modules are installed \ No newline at end of file diff --git a/metabox/metabox/scenarios/packaging/__init__.py b/metabox/metabox/scenarios/packaging/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/metabox/metabox/scenarios/packaging/deb_packaging.py b/metabox/metabox/scenarios/packaging/deb_packaging.py new file mode 100644 index 0000000000..fc83ea8b1a --- /dev/null +++ b/metabox/metabox/scenarios/packaging/deb_packaging.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 +# Copyright (C) 2023 Canonical Ltd. +# +# Authors: +# Fernando Bravo +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from pathlib import Path + +from metabox.core.scenario import Scenario +from metabox.core.actions import ( + Put, + RunManage, + AssertInFile, +) +from metabox.core.utils import tag + +provider_path = Path("/home/ubuntu/checkbox/metabox/metabox/metabox-provider") +packaging_pxu_path = provider_path / "units/packaging.pxu" +substvar_path = provider_path / "debian/metabox-provider.substvars" + +pxu_path = Path(__file__).parent / "packaging.pxu" +with pxu_path.open("r") as file: + packaging_pxu = file.read() + + +@tag("packaging") +class DebPackagingJammy(Scenario): + """ + Verifies that the deb-packaging test pass on jammy (22.04). + """ + + modes = ["local"] + # Run the scenario without starting a session. + start_session = False + config_override = {"local": {"releases": ["jammy"]}} + steps = [ + Put(packaging_pxu_path, packaging_pxu), + RunManage(args="packaging"), + AssertInFile("dep-pack-gt-20", substvar_path), + AssertInFile("rec-pack-gt-20", substvar_path), + AssertInFile("sug-pack-gt-20", substvar_path), + ] + + +@tag("packaging") +class DebPackagingFocal(Scenario): + """ + Verifies that the deb-packaging test pass on focal (20.04). + """ + + modes = ["local"] + start_session = False + config_override = {"local": {"releases": ["focal"]}} + steps = [ + Put(packaging_pxu_path, packaging_pxu), + RunManage(args="packaging"), + AssertInFile("dep-pack-le-20", substvar_path), + AssertInFile("rec-pack-le-20", substvar_path), + AssertInFile("sug-pack-le-20", substvar_path), + ] + + +@tag("packaging") +class DebPackagingBionic(Scenario): + """ + Verifies that the deb-packaging test pass on bionic (18.04). + """ + + modes = ["local"] + config_override = {"local": {"releases": ["bionic"]}} + steps = [ + Put(packaging_pxu_path, packaging_pxu), + RunManage(args="packaging"), + AssertInFile("dep-pack-le-20", substvar_path), + AssertInFile("rec-pack-le-20", substvar_path), + AssertInFile("sug-pack-le-20", substvar_path), + ] + + +@tag("packaging") +class DebPackagingXenial(Scenario): + """ + Verifies that the deb-packaging test pass on xenial (16.04). + """ + + modes = ["local"] + config_override = {"local": {"releases": ["xenial"]}} + steps = [ + Put(packaging_pxu_path, packaging_pxu), + RunManage(args="packaging"), + AssertInFile("dep-pack-le-20", substvar_path), + AssertInFile("rec-pack-le-20", substvar_path), + AssertInFile("sug-pack-le-20", substvar_path), + ] diff --git a/metabox/metabox/scenarios/packaging/packaging.pxu b/metabox/metabox/scenarios/packaging/packaging.pxu new file mode 100644 index 0000000000..24543f14d9 --- /dev/null +++ b/metabox/metabox/scenarios/packaging/packaging.pxu @@ -0,0 +1,29 @@ +unit: packaging meta-data +os-id: ubuntu +os-version-id: <= 20.04 +Depends: dep-pack-le-20 + +unit: packaging meta-data +os-id: ubuntu +os-version-id: > 20.04 +Depends: dep-pack-gt-20 + +unit: packaging meta-data +os-id: ubuntu +os-version-id: <= 20.04 +Recommends: rec-pack-le-20 + +unit: packaging meta-data +os-id: ubuntu +os-version-id: > 20.04 +Recommends: rec-pack-gt-20 + +unit: packaging meta-data +os-id: ubuntu +os-version-id: <= 20.04 +Suggests: sug-pack-le-20 + +unit: packaging meta-data +os-id: ubuntu +os-version-id: > 20.04 +Suggests: sug-pack-gt-20 \ No newline at end of file From 37e6276187d32d11bda4ef5557e48dc19e9c9a3d Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Wed, 24 Jan 2024 16:32:29 +0800 Subject: [PATCH 012/108] Add oem-qa as CODEOWNERS for the contrib ce-oem (Infra) (#937) * Add oem-qa as CODEOWNERS for the ce-oem Checkbox provider in contrib * Add CODEOWNERS rights for oem-qa team to modify the ce-oem GitHub workflow --- .github/CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..2aef91fb5c --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,3 @@ +# Devices QA team +contrib/checkbox-provider-ce-oem @canonical/oem-qa +.github/workflows/tox-contrib-provider-ce-oem.yaml @canonical/oem-qa From 727e0049c1eda6949c9075ae2590c5de98f961bd Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Wed, 24 Jan 2024 16:34:15 +0800 Subject: [PATCH 013/108] Add contrib provider ce-oem GitHub workflows (Infra) (#938) * Remove old ce-oem workflow This workflow comes from the original Git repository and is no longer needed. * Add workflow for the ce-oem provider This workflow is adapted from existing workflows used for the generic providers. * Update contrib ce-oem tox config - Point to the correct version of Python using {envpython} - Remove the "develop installation" step for the ce-oem provider - Convert the test step into a test coverage one to generate an XML report to be used later by Codecov * Point to the right directory for generic providers * Install ce-oem provider prior to run validation on it * Add codecov config for ce-oem contrib provider Following Codecov documentation on flags[1], use the `contrib-provider-ce-oem` flag defined in the GitHub workflow[2] to mark Codecov status checks as non-blocking[3]. [1] https://docs.codecov.com/docs/flags [2] .github/workflows/tox-contrib-provider-ce-oem.yaml [3] https://docs.codecov.com/docs/common-recipe-list#set-non-blocking-status-checks * Fix tox configuration for ce-oem provider * Add coverage requirement to ce-oem tox config * Add coverage config to ce-oem provider * Update codecov config to make project status non-blocking for ce-oem provider * Change job id and add a name to it for easier reference --- .../tox-contrib-provider-ce-oem.yaml | 47 +++++++++++++++++++ codecov.yml | 11 +++++ contrib/.github/workflows/tox.yaml | 35 -------------- contrib/checkbox-provider-ce-oem/.coveragerc | 17 +++++++ contrib/checkbox-provider-ce-oem/tox.ini | 32 ++++++++----- 5 files changed, 94 insertions(+), 48 deletions(-) create mode 100644 .github/workflows/tox-contrib-provider-ce-oem.yaml delete mode 100644 contrib/.github/workflows/tox.yaml create mode 100644 contrib/checkbox-provider-ce-oem/.coveragerc diff --git a/.github/workflows/tox-contrib-provider-ce-oem.yaml b/.github/workflows/tox-contrib-provider-ce-oem.yaml new file mode 100644 index 0000000000..673bd4540f --- /dev/null +++ b/.github/workflows/tox-contrib-provider-ce-oem.yaml @@ -0,0 +1,47 @@ +name: Test provider-ce-oem (from contrib area) with tox + +on: + push: + branches: [ main ] + paths: + - contrib/checkbox-provider-ce-oem/** + pull_request: + branches: [ main ] + paths: + - contrib/checkbox-provider-ce-oem/** + workflow_dispatch: + +jobs: + tox_test_contrib_ce_oem_provider: + name: Test ce-oem provider (from contrib area) with tox + defaults: + run: + working-directory: contrib/checkbox-provider-ce-oem + runs-on: ubuntu-20.04 + strategy: + matrix: + python: ["3.5", "3.6", "3.8", "3.10"] + include: + - python: "3.5" + tox_env_name: "py35" + - python: "3.6" + tox_env_name: "py36" + - python: "3.8" + tox_env_name: "py38" + - python: "3.10" + tox_env_name: "py310" + steps: + - uses: actions/checkout@v3 + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + - name: Install tox + run: pip install tox + - name: Run tox + run: tox -e${{ matrix.tox_env_name }} + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + flags: contrib-provider-ce-oem diff --git a/codecov.yml b/codecov.yml index 2181d8cc80..01a2911c76 100644 --- a/codecov.yml +++ b/codecov.yml @@ -8,3 +8,14 @@ flag_management: # carryforward means if a test was not run again, use the previous # coverage result for the current flag (part) carryforward: true + individual_flags: + # non-blocking status check for ce-oem provider in contrib area + - name: contrib-provider-ce-oem + paths: + - contrib/checkbox-provider-ce-oem + carryforward: true + statuses: + - type: patch + informational: true + - type: project + informational: true diff --git a/contrib/.github/workflows/tox.yaml b/contrib/.github/workflows/tox.yaml deleted file mode 100644 index d453966604..0000000000 --- a/contrib/.github/workflows/tox.yaml +++ /dev/null @@ -1,35 +0,0 @@ -name: Test checkbox-provider-ce-oem with tox - -on: - push: - pull_request: - branches: [ main ] - paths: - - checkbox-provider-ce-oem/** - workflow_dispatch: - -jobs: - build: - runs-on: ubuntu-20.04 - strategy: - matrix: - python: ["3.5", "3.6", "3.8", "3.10"] - steps: - - uses: actions/checkout@v3 - - name: install stable checkbox and checkbox-provider-ce-oem - run: | - cd ~ - git clone --depth=1 https://github.com/canonical/checkbox.git - cd - - cp -r checkbox-provider-ce-oem ~/checkbox/providers - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python }} - - name: Install tox and otehr dependencies - run: | - python3 -m pip install --upgrade pip - pip3 install tox - - name: Run tox - working-directory: /home/runner/checkbox/providers/checkbox-provider-ce-oem/ - run: tox -e py${{ matrix.python }} diff --git a/contrib/checkbox-provider-ce-oem/.coveragerc b/contrib/checkbox-provider-ce-oem/.coveragerc new file mode 100644 index 0000000000..8dfe971275 --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True +source = bin,manage +omit = + tests/* + +[report] +exclude_lines = + @abc.abstractmethod + @abc.abstractproperty + @abstractmethod + @abstractproperty + @public + pragma: no cover + raise NotImplementedError + if __name__ == .__main__.: +show_missing = True diff --git a/contrib/checkbox-provider-ce-oem/tox.ini b/contrib/checkbox-provider-ce-oem/tox.ini index 6028600aad..2090b30207 100644 --- a/contrib/checkbox-provider-ce-oem/tox.ini +++ b/contrib/checkbox-provider-ce-oem/tox.ini @@ -6,26 +6,29 @@ skipsdist=True [testenv] allowlist_externals = rm commands = - pip -q install ../../checkbox-ng + {envpython} -m pip -q install ../../checkbox-ng # Required because this provider depends on checkbox-support parsers & scripts - pip -q install ../../checkbox-support + {envpython} -m pip -q install ../../checkbox-support rm -f /var/tmp/checkbox-providers-develop/checkbox-provider-resource.provider rm -f /var/tmp/checkbox-providers-develop/checkbox-provider-base.provider rm -f /var/tmp/checkbox-providers-develop/checkbox-provider-certification-client.provider rm -f /var/tmp/checkbox-providers-develop/checkbox-provider-certification-server.provider rm -f /var/tmp/checkbox-providers-develop/checkbox-provider-ce-oem.provider # Install all providers in develop mode to make sure everything works fine - {envbindir}/python3 ../resource/manage.py develop - {envbindir}/python3 ../base/manage.py develop - {envbindir}/python3 ../certification-client/manage.py develop - {envbindir}/python3 ../certification-server/manage.py develop - {envbindir}/python3 manage.py develop - {envbindir}/python3 manage.py validate - {envbindir}/python3 manage.py test + {envpython} ../../providers/resource/manage.py develop + {envpython} ../../providers/base/manage.py develop + {envpython} ../../providers/certification-client/manage.py develop + {envpython} ../../providers/certification-server/manage.py develop + {envpython} manage.py develop + {envpython} manage.py validate + {envpython} -m coverage run manage.py test + {envpython} -m coverage report + {envpython} -m coverage xml -[testenv:py3.5] +[testenv:py35] deps = flake8 + coverage == 5.5 pyserial natsort == 4.0.3 requests == 2.9.1 @@ -42,9 +45,10 @@ setenv= # but it breaks some old python3.5 builds SETUPTOOLS_SCM_PRETEND_VERSION=0.0 -[testenv:py3.6] +[testenv:py36] deps = flake8 + coverage == 5.5 pyserial natsort == 4.0.3 requests == 2.18.4 @@ -57,9 +61,10 @@ deps = distro == 1.0.1 PyYAML == 3.12 -[testenv:py3.8] +[testenv:py38] deps = flake8 + coverage == 7.3.0 pep8-naming pyserial natsort == 7.0.1 @@ -73,9 +78,10 @@ deps = distro == 1.4.0 PyYAML == 5.3.1 -[testenv:py3.10] +[testenv:py310] deps = flake8 + coverage == 7.3.0 pep8-naming pyserial natsort == 8.0.2 From 82ada11f3d5c24885ffc1fb63bd3f9ec8698b202 Mon Sep 17 00:00:00 2001 From: eugene-yujinwu <117058060+eugene-yujinwu@users.noreply.github.com> Date: Thu, 25 Jan 2024 21:50:27 +0800 Subject: [PATCH 014/108] Fix the wrong category issue for the camera 5986:118c (BugFix) (#964) Fix the wrong category issue for the camera 5986:118c (BugFix) (LP:#2051091) --- checkbox-support/checkbox_support/parsers/udevadm.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/checkbox-support/checkbox_support/parsers/udevadm.py b/checkbox-support/checkbox_support/parsers/udevadm.py index 70bdc2a5a6..158f965e3e 100644 --- a/checkbox-support/checkbox_support/parsers/udevadm.py +++ b/checkbox-support/checkbox_support/parsers/udevadm.py @@ -98,6 +98,7 @@ ROOT_MOUNTPOINT = re.compile( r'MOUNTPOINT=.*/(writable|hostfs|' r'ubuntu-seed|ubuntu-boot|ubuntu-save|data|boot)') +CAMERA_RE = re.compile(r"Camera", re.I) def slugify(_string): @@ -457,7 +458,16 @@ def category(self): return "CARDREADER" if (self.vendor is not None and GENERIC_RE.search(self.vendor) and - not FLASH_DISK_RE.search(self.product)): + self.product is not None and + not FLASH_DISK_RE.search(self.product) and + not CAMERA_RE.search(self.product)): + # The condition + # not CAMERA_RE.search(self.product) + # is a fix for the bug LP: #2051091, + # the usb camera '5986:118c Acer, Inc Integrated Camera + # of the system, + # CID 202309-32040 Lenovo - ThinkPad P16s Gen 2 AMD + # Will be given the category attribut 'CARDREADER' by udev_resource return "CARDREADER" # A rare gem, this driver reported by udev is actually an ID_MODEL: # E: DRIVER=Realtek PCIe card reader From 54ef03a206a7b80688bd4599f3bf8d907368efeb Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 31 Jan 2024 10:25:45 +0100 Subject: [PATCH 015/108] Add missing category to resource jobs (bugfix) (#973) Add missing category to resource jobs --- providers/resource/jobs/category.pxu | 3 ++ providers/resource/jobs/resource.pxu | 44 +++++++++++++++++++++++++++- 2 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 providers/resource/jobs/category.pxu diff --git a/providers/resource/jobs/category.pxu b/providers/resource/jobs/category.pxu new file mode 100644 index 0000000000..f340350418 --- /dev/null +++ b/providers/resource/jobs/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: information_gathering +_name: Gathers information about the DUT diff --git a/providers/resource/jobs/resource.pxu b/providers/resource/jobs/resource.pxu index 221c97c1e0..41b4ad3008 100644 --- a/providers/resource/jobs/resource.pxu +++ b/providers/resource/jobs/resource.pxu @@ -55,6 +55,7 @@ Depends: iw (>= 5.3) id: cpuinfo estimated_duration: 0.37 plugin: resource +category_id: information_gathering user: root command: cpuinfo_resource.py _summary: Collect information about the CPU @@ -63,6 +64,7 @@ _description: Gets CPU resource info from /proc/cpuinfo id: cdimage estimated_duration: 0.61 plugin: resource +category_id: information_gathering user: root command: cdimage_resource.py _summary: Collect information about installation media (casper) @@ -71,6 +73,7 @@ _description: Gets installation info from casper.log and media-info id: dpkg estimated_duration: 0.19 plugin: resource +category_id: information_gathering command: dpkg_resource.py _summary: Collect information about dpkg version _description: Gets info on the version of dpkg installed @@ -78,6 +81,7 @@ _description: Gets info on the version of dpkg installed id: lsb estimated_duration: 1.63 plugin: resource +category_id: information_gathering command: lsb_resource.py _description: Generates release info based on /etc/lsb-release _summary: Collect information about installed system (lsb-release) @@ -85,6 +89,7 @@ _summary: Collect information about installed system (lsb-release) id: meminfo estimated_duration: 0.1 plugin: resource +category_id: information_gathering command: meminfo_resource.py _description: Generates resource info based on /proc/meminfo _summary: Collect information about system memory (/proc/meminfo) @@ -92,6 +97,7 @@ _summary: Collect information about system memory (/proc/meminfo) id: module estimated_duration: 0.13 plugin: resource +category_id: information_gathering user: root command: module_resource.py _description: Generates resources info on running kernel modules @@ -100,6 +106,7 @@ _summary: Collect information about kernel modules id: package estimated_duration: 1.16 plugin: resource +category_id: information_gathering command: # shellcheck disable=SC2016 dpkg-query -W -f='name: ${Package}\nversion: ${Version}\n\n' || true @@ -109,6 +116,7 @@ _summary: Collect information about installed software packages id: executable estimated_duration: 0.78 plugin: resource +category_id: information_gathering _summary: Enumerate available system executables _description: Generates a resource for all available executables command: @@ -117,6 +125,7 @@ command: id: device estimated_duration: 0.48 plugin: resource +category_id: information_gathering command: udev_resource.py _description: Creates resource info from udev _summary: Collect information about hardware devices (udev) @@ -124,6 +133,7 @@ _summary: Collect information about hardware devices (udev) id: removable_partition estimated_duration: 0.48 plugin: resource +category_id: information_gathering command: udev_resource.py -f PARTITION || true _description: Creates removable partitions info from udev _summary: Collect removable partitions info from udev @@ -131,6 +141,7 @@ _summary: Collect removable partitions info from udev id: dmi_present estimated_duration: 0.02 plugin: resource +category_id: information_gathering user: root command: if [ -d /sys/devices/virtual/dmi ] @@ -144,6 +155,7 @@ _summary: Resource to detect if dmi data is present id: dmi estimated_duration: 0.59 plugin: resource +category_id: information_gathering requires: dmi_present.state == 'supported' user: root @@ -153,6 +165,7 @@ _summary: Collect information about hardware devices (DMI) id: efi estimated_duration: 0.56 plugin: resource +category_id: information_gathering user: root command: efi_resource.py _summary: Collect information about the EFI configuration @@ -160,6 +173,7 @@ _summary: Collect information about the EFI configuration id: uname estimated_duration: 0.09 plugin: resource +category_id: information_gathering command: uname_resource.py _description: Creates resource info from uname output _summary: Collect information about the running kernel @@ -167,6 +181,7 @@ _summary: Collect information about the running kernel id: sleep estimated_duration: 0.03 plugin: resource +category_id: information_gathering command: tr ' ' '\n' < /sys/power/state | while IFS= read -r state; do echo "$state: supported"; done if [ -e /sys/power/mem_sleep ]; then @@ -181,6 +196,7 @@ template-resource: device template-filter: device.category == 'CDROM' id: optical_drive_{name} plugin: resource +category_id: information_gathering command: optical_resource.py /dev/{name} estimated_duration: 0.5 _summary: Create resource info for supported optical actions ({name}) @@ -188,6 +204,7 @@ _summary: Create resource info for supported optical actions ({name}) id: block_device estimated_duration: 0.08 plugin: resource +category_id: information_gathering user: root command: block_device_resource.py _summary: Create resource info for removable block devices @@ -196,6 +213,7 @@ id: usb template-engine: jinja2 estimated_duration: 0.33 plugin: resource +category_id: information_gathering _description: Creates resource info for supported USB versions _summary: Collect information about supported types of USB command: @@ -211,6 +229,7 @@ command: id: xinput estimated_duration: 0.19 plugin: resource +category_id: information_gathering command: xinput_resource.py requires: package.name == "xinput" _summary: Creates resource info from xinput output. @@ -218,6 +237,7 @@ _summary: Creates resource info from xinput output. id: environment estimated_duration: 0.11 plugin: resource +category_id: information_gathering _summary: Create resource info for environment variables command: IFS=$'\n' @@ -228,12 +248,14 @@ command: id: mobilebroadband estimated_duration: 0.38 plugin: resource +category_id: information_gathering _summary: Create resource for mobile broadband devices command: mobilebroadband_resource.sh id: virtualization estimated_duration: 0.13 plugin: resource +category_id: information_gathering requires: package.name == "cpu-checker" or executable.name == 'kvm-ok' _summary: Resource for hardware virtualization @@ -248,18 +270,20 @@ command: id: IEEE_80211 estimated_duration: 0.08 plugin: resource +category_id: information_gathering command: 80211_resource _summary: Creates resource info for wifi supported protocols/interfaces id: wireless_sta_protocol plugin: resource +category_id: information_gathering _summary: Resource job to identify Wi-Fi STA supported protocols _description: Job listing STA supported 802.11 protocols per interfaces. command: # shellcheck disable=SC2046 for i in $(iw dev | grep -oP 'Interface\s+\K\w+'); do iw phy phy$(iw dev "$i" info | grep -oP 'wiphy\s+\K\d+') info | grep -q 'VHT' && echo "$i""_ac: supported" || echo "$i""_ac: unsupported"; done - # MCS 10 and 11 if present support the ax only 1024-QAM + # MCS 10 and 11 if present support the ax only 1024-QAM # shellcheck disable=SC2046 for i in $(iw dev | grep -oP 'Interface\s+\K\w+'); do iw phy phy$(iw dev "$i" info | grep -oP 'wiphy\s+\K\d+') info | grep -q 'MCS 0-11' && echo "$i""_ax: supported" || echo "$i""_ax: unsupported"; done estimated_duration: 0.5 @@ -268,6 +292,7 @@ flags: preserve-locale id: rtc estimated_duration: 0.02 plugin: resource +category_id: information_gathering command: if [ -e /proc/driver/rtc ] then @@ -280,6 +305,7 @@ _summary: Creates resource info for RTC id: requirements estimated_duration: 0.01 plugin: resource +category_id: information_gathering command: if [ -f "$PLAINBOX_SESSION_SHARE"/requirements_docs.txt ];then cat "$PLAINBOX_SESSION_SHARE"/requirements_docs.txt @@ -315,12 +341,14 @@ _description: id: graphics_card estimated_duration: 0.05 plugin: resource +category_id: information_gathering _summary: Generate an entry for each graphics card present in the system. command: graphics_card_resource.py id: fwts estimated_duration: 0.5 plugin: resource +category_id: information_gathering requires: executable.name == "fwts" _summary: Generate an entry for each FWTS test available command: @@ -329,6 +357,7 @@ command: id: mir estimated_duration: 0.5 plugin: resource +category_id: information_gathering requires: package.name == "mir-test-tools" _summary: Generate an entry for each MIR integration tests command: @@ -338,6 +367,7 @@ command: id: wifi_interface_mode estimated_duration: 0.1 plugin: resource +category_id: information_gathering command: # shellcheck disable=SC2046 for i in $(iw dev | grep -oP 'Interface\s+\K\w+'); @@ -350,6 +380,7 @@ _summary: Create resource info for supported wifi interface modes id: snap estimated_duration: 1.1 plugin: resource +category_id: information_gathering command: unset PYTHONUSERBASE snapd_resource.py snaps @@ -359,6 +390,7 @@ _summary: Collect information about installed snap packages id: interface estimated_duration: 1.1 plugin: resource +category_id: information_gathering command: unset PYTHONUSERBASE snapd_resource.py interfaces endpoints @@ -368,6 +400,7 @@ _summary: Collect information about interfaces id: connections estimated_duration: 1.1 plugin: resource +category_id: information_gathering command: unset PYTHONUSERBASE snapd_resource.py interfaces connections @@ -379,6 +412,7 @@ _summary: Collect model assertions on the device _description: Queries the snapd REST API for model assertions present on the device. plugin: resource +category_id: information_gathering estimated_duration: 2.0 command: snapd_resource.py assertions model @@ -388,6 +422,7 @@ _summary: Collect serial assertions on the device _description: Queries the snapd REST API for serial assertions present on the device. plugin: resource +category_id: information_gathering estimated_duration: 2.0 command: snapd_resource.py assertions serial @@ -399,6 +434,7 @@ _description: relies on the user specifying the ports. This is to allow template jobs to then be instantiated. plugin: resource +category_id: information_gathering estimated_duration: 1.0 command: for i in $SERIAL_PORTS_STATIC; do @@ -413,6 +449,7 @@ _description: device type or owner requirements. This resource detects that state of those features so that appropriate tests can be run. plugin: resource +category_id: information_gathering estimated_duration: 1.0 command: snapd_resource.py features @@ -426,6 +463,7 @@ _description: responsible for cofiguring an individual interface allowing appropriate tests to be run. plugin: resource +category_id: information_gathering estimated_duration: 2.0 user: root command: @@ -449,6 +487,7 @@ _description: Apply a simple heuristic to determine the bootloader that is used on the device. This can help identify what boot security systems might be used. plugin: resource +category_id: information_gathering estimated_duration: 1.0 user: root command: @@ -457,6 +496,7 @@ command: id: nvdimm_resource estimated_duration: 0.25 plugin: resource +category_id: information_gathering user: root requires: package.name == 'ipmctl' or executable.name == 'ipmctl' @@ -472,6 +512,7 @@ command: id: audio_card estimated_duration: 0.05 plugin: resource +category_id: information_gathering _summary: Collect information about the audio card _description: Gets audio resource info from /proc/asound/pcm command: audio_card_resource.py @@ -479,6 +520,7 @@ command: audio_card_resource.py id: image_source_and_type estimated_duration: 0.05 plugin: resource +category_id: information_gathering _summary: Collect the source and type of image _description: Get the source and type of image. From 16f83c36cf31ca37131a4ce94717186527533a40 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 31 Jan 2024 12:11:51 +0100 Subject: [PATCH 016/108] Update ping test to not crash on failing pings (BugFix) (#969) * Update ping test to not crash on failing pings * partially black gateway and tests * Whitespace flake8 fix * Additional tests to hit the coverage quota * Review feedback --- providers/base/bin/gateway_ping_test.py | 130 ++++++++++++------ .../base/tests/test_gateway_ping_test.py | 89 ++++++++++++ 2 files changed, 177 insertions(+), 42 deletions(-) create mode 100644 providers/base/tests/test_gateway_ping_test.py diff --git a/providers/base/bin/gateway_ping_test.py b/providers/base/bin/gateway_ping_test.py index 59f43d31ce..1df8270300 100755 --- a/providers/base/bin/gateway_ping_test.py +++ b/providers/base/bin/gateway_ping_test.py @@ -219,41 +219,56 @@ def ping(host, interface, count, deadline, verbose=False): return ping_summary -def main(args): - gettext.textdomain("com.canonical.certification.checkbox") - gettext.bindtextdomain("com.canonical.certification.checkbox", - os.getenv("CHECKBOX_PROVIDER_LOCALE_DIR", None)) +def parse_args(argv): default_count = 2 default_delay = 4 route = Route() parser = argparse.ArgumentParser() parser.add_argument( - "host", nargs='?', default=route.get_default_gateway(), - help=_("host to ping")) + "host", + nargs="?", + default=route.get_default_gateway(), + help=_("host to ping"), + ) parser.add_argument( - "-c", "--count", default=default_count, type=int, - help=_("number of packets to send")) + "-c", + "--count", + default=default_count, + type=int, + help=_("number of packets to send"), + ) parser.add_argument( - "-d", "--deadline", default=default_delay, type=int, - help=_("timeout in seconds")) + "-d", + "--deadline", + default=default_delay, + type=int, + help=_("timeout in seconds"), + ) parser.add_argument( - "-t", "--threshold", default=0, type=int, - help=_("allowed packet loss percentage (default: %(default)s)")) + "-t", + "--threshold", + default=0, + type=int, + help=_("allowed packet loss percentage (default: %(default)s)"), + ) parser.add_argument( - "-v", "--verbose", action='store_true', help=_("be verbose")) + "-v", "--verbose", action="store_true", help=_("be verbose") + ) parser.add_argument( - "-I", "--interface", help=_("use specified interface to send packets")) - args = parser.parse_args() + "-I", "--interface", help=_("use specified interface to send packets") + ) + args = parser.parse_args(argv) # Ensure count and deadline make sense. Adjust them if not. if args.deadline != default_delay and args.count != default_count: # Ensure they're both consistent, and exit with a warning if not, # rather than modifying what the user explicitly set. if args.deadline <= args.count: # FIXME: this cannot ever be translated correctly - print(_( - "ERROR: not enough time for {0} pings in {1} seconds" - ).format(args.count, args.deadline)) - return 1 + raise SystemExit( + _( + "ERROR: not enough time for {0} pings in {1} seconds" + ).format(args.count, args.deadline) + ) elif args.deadline != default_delay: # Adjust count according to delay. args.count = args.deadline - 1 @@ -261,43 +276,74 @@ def main(args): args.count = 1 if args.verbose: # FIXME: this cannot ever be translated correctly - print(_( - "Adjusting ping count to {0} to fit in {1}-second deadline" - ).format(args.count, args.deadline)) + print( + _( + "Adjusting ping count to {0} to fit in {1}-second deadline" + ).format(args.count, args.deadline) + ) else: # Adjust delay according to count args.deadline = args.count + 1 if args.verbose: # FIXME: this cannot ever be translated correctly - print(_( - "Adjusting deadline to {0} seconds to fit {1} pings" - ).format(args.deadline, args.count)) + print( + _("Adjusting deadline to {0} seconds to fit {1} pings").format( + args.deadline, args.count + ) + ) + return args + + +def main(argv) -> int: + gettext.textdomain("com.canonical.certification.checkbox") + gettext.bindtextdomain( + "com.canonical.certification.checkbox", + os.getenv("CHECKBOX_PROVIDER_LOCALE_DIR", None), + ) + + args = parse_args(argv) + # If given host is not pingable, override with something pingable. host = get_host_to_ping( - interface=args.interface, verbose=args.verbose, default=args.host) + interface=args.interface, verbose=args.verbose, default=args.host + ) if args.verbose: print(_("Checking connectivity to {0}").format(host)) - ping_summary = None + if host: - ping_summary = ping(host, args.interface, args.count, - args.deadline, args.verbose) - if ping_summary is None or ping_summary['received'] == 0: + ping_summary = ping( + host, args.interface, args.count, args.deadline, args.verbose + ) + else: + ping_summary = { + "received": 0, + "cause": "Unable to find any host to ping", + } + + if ping_summary["received"] == 0: print(_("No Internet connection")) - if ping_summary.get('cause'): - print("Possible cause: {}".format(ping_summary['cause'])) + if ping_summary.get("cause"): + print("Possible cause: {}".format(ping_summary["cause"])) return 1 - elif ping_summary['transmitted'] != ping_summary['received']: - print(_("Connection established, but lost {0}% of packets").format( - ping_summary['pct_loss'])) - if ping_summary['pct_loss'] > args.threshold: - print(_( - "FAIL: {0}% packet loss is higher than {1}% threshold" - ).format(ping_summary['pct_loss'], args.threshold)) + elif ping_summary["transmitted"] != ping_summary["received"]: + print( + _("Connection established, but lost {0}% of packets").format( + ping_summary["pct_loss"] + ) + ) + if ping_summary["pct_loss"] > args.threshold: + print( + _( + "FAIL: {0}% packet loss is higher than {1}% threshold" + ).format(ping_summary["pct_loss"], args.threshold) + ) return 1 else: - print(_( - "PASS: {0}% packet loss is within {1}% threshold" - ).format(ping_summary['pct_loss'], args.threshold)) + print( + _("PASS: {0}% packet loss is within {1}% threshold").format( + ping_summary["pct_loss"], args.threshold + ) + ) return 0 else: print(_("Connection to test host fully established")) diff --git a/providers/base/tests/test_gateway_ping_test.py b/providers/base/tests/test_gateway_ping_test.py new file mode 100644 index 0000000000..4a2a253a4a --- /dev/null +++ b/providers/base/tests/test_gateway_ping_test.py @@ -0,0 +1,89 @@ +import unittest +from unittest.mock import patch +from gateway_ping_test import main, parse_args + + +class TestMainFunction(unittest.TestCase): + @patch("gateway_ping_test.get_host_to_ping") + @patch("gateway_ping_test.ping") + def test_no_internet_connection_no_cause( + self, mock_ping, mock_get_host_to_ping + ): + mock_get_host_to_ping.return_value = None + mock_ping.return_value = None + result = main(["1.1.1.1"]) + self.assertEqual(result, 1) + + @patch("gateway_ping_test.get_host_to_ping") + @patch("gateway_ping_test.ping") + def test_no_internet_connection_cause( + self, mock_ping, mock_get_host_to_ping + ): + mock_ping.return_value = {"received": 0, "cause": "Test cause"} + result = main(["1.1.1.1"]) + self.assertEqual(result, 1) + + @patch("gateway_ping_test.get_host_to_ping") + @patch("gateway_ping_test.ping") + def test_packet_loss_within_threshold( + self, mock_ping, mock_get_host_to_ping + ): + mock_ping.return_value = { + "transmitted": 100, + "received": 95, + "pct_loss": 5, + } + result = main(["1.1.1.1", "-t", "10"]) + self.assertEqual(result, 0) + + @patch("gateway_ping_test.get_host_to_ping") + @patch("gateway_ping_test.ping") + def test_packet_loss_exceeding_threshold( + self, mock_ping, mock_get_host_to_ping + ): + mock_ping.return_value = { + "transmitted": 100, + "received": 80, + "pct_loss": 20, + } + result = main(["1.1.1.1", "-t", "10"]) + self.assertEqual(result, 1) + + @patch("gateway_ping_test.get_host_to_ping") + @patch("gateway_ping_test.ping") + def test_full_connectivity(self, mock_ping, mock_get_host_to_ping): + mock_ping.return_value = { + "transmitted": 100, + "received": 100, + "pct_loss": 0, + } + result = main(["1.1.1.1"]) + self.assertEqual(result, 0) + + @patch("gateway_ping_test.get_host_to_ping") + @patch("gateway_ping_test.ping") + def test_verbose_output(self, mock_ping, mock_get_host_to_ping): + mock_ping.return_value = { + "transmitted": 100, + "received": 100, + "pct_loss": 0, + } + result = main(["1.1.1.1", "-v"]) + self.assertEqual(result, 0) + + @patch("gateway_ping_test.get_host_to_ping") + @patch("gateway_ping_test.ping") + def test_invalid_arguments_count_deadline( + self, mock_ping, mock_get_host_to_ping + ): + with self.assertRaises(SystemExit): + main(["-c", "10", "-d", "8"]) + + def test_adjust_count_based_on_non_default_deadline(self): + # Assuming default_delay is 4 + args = parse_args(["-d", "1", "-v"]) + self.assertEqual( + args.count, + 1, + "Count should be adjusted based on the non-default deadline", + ) From ddb7aa82cef972d6df0b4ff0bcb16d67fd40302a Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 31 Jan 2024 16:15:06 +0100 Subject: [PATCH 017/108] Fix crash on missing plainbox-provider-develop (BugFix) (#974) * Fix crash on missing plainbox-provider-develop * Avoid crashing on none providerpath * Add tests for invoked * Apply suggestions from code review Co-authored-by: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> * Split and create an independent test Minor: Fixed duplicated code * Pure coverage change: trigger the if eq --------- Co-authored-by: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> --- checkbox-ng/plainbox/provider_manager.py | 17 +++++-- checkbox-ng/plainbox/test_provider_manager.py | 46 +++++++++++++++++-- 2 files changed, 55 insertions(+), 8 deletions(-) diff --git a/checkbox-ng/plainbox/provider_manager.py b/checkbox-ng/plainbox/provider_manager.py index a4c5860169..5f4792549c 100644 --- a/checkbox-ng/plainbox/provider_manager.py +++ b/checkbox-ng/plainbox/provider_manager.py @@ -551,11 +551,18 @@ def register_parser(self, subparsers): def invoked(self, ns): pp_env = os.getenv("PROVIDERPATH") - if pp_env and not os.path.samefile(pp_env, ns.directory): - _logger.warning( - "$PROVIDERPATH is defined, ignoring -d/--directory" - " and developing in: %s", pp_env - ) + # If $PROVIDERPATH is defined, use it instead of the one on the + # namespace + if pp_env: + try: + samefile = os.path.samefile(pp_env, ns.directory) + except FileNotFoundError: + samefile = False + if not samefile: + _logger.warning( + "$PROVIDERPATH is defined, ignoring -d/--directory" + " and developing in: %s", pp_env + ) ns.directory = pp_env pathname = os.path.join( ns.directory, "{}.provider".format( diff --git a/checkbox-ng/plainbox/test_provider_manager.py b/checkbox-ng/plainbox/test_provider_manager.py index 6bf912c42e..8dd27663f0 100644 --- a/checkbox-ng/plainbox/test_provider_manager.py +++ b/checkbox-ng/plainbox/test_provider_manager.py @@ -269,13 +269,16 @@ def test_sdist__partial(self): self.assert_common_sdist(tarball) @mock.patch('plainbox.impl.providers.v1.get_universal_PROVIDERPATH_entry') - def test_develop(self, mock_path_entry): + @mock.patch('os.getenv') + def test_develop(self, mock_getenv, mock_path_entry): """ verify that ``develop`` creates a provider file """ provider_path = os.path.join(self.tmpdir, "checkbox-providers-develop") filename = os.path.join( provider_path, "com.example.test.provider") + # no PROVIDERPATH defined + mock_getenv.return_value = provider_path mock_path_entry.return_value = provider_path content = ( "[PlainBox Provider]\n" @@ -288,12 +291,47 @@ def test_develop(self, mock_path_entry): self.tool.main(["develop"]) self.assertFileContent(filename, content) + @mock.patch("plainbox.impl.providers.v1.get_universal_PROVIDERPATH_entry") + @mock.patch("os.getenv") + @mock.patch("os.path.samefile") + def test_develop_provider_path( + self, mock_samefile, mock_getenv, mock_path_entry + ): + """ + verify that ``develop`` creates a provider file + """ + provider_path = os.path.join(self.tmpdir, "checkbox-providers-develop") + filename = os.path.join(provider_path, "com.example.test.provider") + # PROVIDERPATH defined + mock_getenv.return_value = provider_path + mock_samefile.side_effect = FileNotFoundError + mock_path_entry.return_value = provider_path + content = textwrap.dedent( + """ + [PlainBox Provider] + description = description + gettext_domain = domain + location = {} + name = com.example:test + version = 1.0 + + """.format( + self.tmpdir + ) + ).lstrip() + + self.tool.main(["develop"]) + self.assertFileContent(filename, content) + @mock.patch('plainbox.impl.providers.v1.get_universal_PROVIDERPATH_entry') - def test_develop__force(self, mock_path_entry): + @mock.patch('os.getenv') + def test_develop__force(self, mock_getenv, mock_path_entry): """ verify that ``develop --force`` overwrites existing .provider file """ + # no PROVIDERPATH defined + mock_getenv.return_value = None # support running test from venv provider_path = os.path.join(self.tmpdir, "checkbox-providers-develop") filename = os.path.join( provider_path, "com.example.test.provider") @@ -313,10 +351,12 @@ def test_develop__force(self, mock_path_entry): self.assertFileContent(filename, content) @mock.patch('plainbox.impl.providers.v1.get_universal_PROVIDERPATH_entry') - def test_develop__uninstall(self, mock_path_entry): + @mock.patch('os.getenv') + def test_develop__uninstall(self, mock_getenv, mock_path_entry): """ verify that ``develop --uninstall`` works """ + mock_getenv.return_value = None # support running test from venv provider_path = os.path.join(self.tmpdir, "checkbox-providers-develop") filename = os.path.join( provider_path, "com.example.test.provider") From 51b4c91d1217bd713b5feffea0d0efff51ed96f3 Mon Sep 17 00:00:00 2001 From: Pei Yao-Chang Date: Thu, 1 Feb 2024 16:36:12 +0800 Subject: [PATCH 018/108] Support user defined snaps from config_var to fix issue 923 (BugFix) (#953) * Support SNAP_CONFINEMENT_ALLOWLIST and refactor * Fix issue 923 and refactor original logic --- providers/base/bin/snap_confinement_test.py | 188 +++++++++----- .../base/tests/test_snap_confinement_test.py | 231 ++++++++++++++++++ 2 files changed, 360 insertions(+), 59 deletions(-) create mode 100644 providers/base/tests/test_snap_confinement_test.py diff --git a/providers/base/bin/snap_confinement_test.py b/providers/base/bin/snap_confinement_test.py index 4211864b86..7d1a09cfd8 100755 --- a/providers/base/bin/snap_confinement_test.py +++ b/providers/base/bin/snap_confinement_test.py @@ -1,9 +1,10 @@ #!/usr/bin/env python3 -# Copyright 2021 Canonical Ltd. +# Copyright 2021 - 2024 Canonical Ltd. # All rights reserved. # # Written by: # Patrick Liu +# Patrick Chang # # Checkbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, @@ -20,6 +21,7 @@ import argparse import json import logging +import os import re import sys from checkbox_support.snap_utils.snapd import Snapd @@ -95,86 +97,154 @@ def test_system_confinement(): return sandbox_features_output -def test_snaps_confinement(): +class SnapsConfinementVerifier: """ Test the confinement status of all installed snaps. - A snap confinement should be 'strict', devmode should be False, - and should not have a sideloaded revision starts with 'x'. + A snap's confinement should be 'strict', devmode should be False, + and it should not have a sideloaded revision that starts with 'x'. - Variables: - allowlist_snaps (list): A list of snap names or regex patterns - that are exempted from the confinement check. To match the - entire snap name, use the pattern "^$". For - example, "bugit" matches only "bugit". To match multiple - snaps with similar names, use ".*" suffixes. For instance, - "checkbox.*" matches all snap names starting with "checkbox". - Customize this list to exclude specific snaps from the - confinement checks based on their names or patterns. - Returns: - int: Exit code. 0 if the test passes for all snaps, - otherwise 1. - """ - allowlist_snaps = [ - r"^bugit$", - r"checkbox.*", - r"^mir-test-tools$", - r"^graphics-test-tools$", - ] + Attributes: + _official_allowlist (list): A list of regex patterns exempted from + the confinement check. Snaps matching these patterns will be + excluded from the test. Customize this list to define specific + snaps that should be ignored based on their names or patterns. + + _allowlist_from_config_var (list): A list of snap names + from the SNAP_CONFINEMENT_ALLOWLIST environment variable. + Snaps matching these patterns will be excluded from the test. + + _desired_attributes (list): A list of attributes of a snap that + are of interest for testing. The attributes include "name", + "confinement", "devmode", and "revision". - data = Snapd().list() - exit_code = 0 - for snap in data: - snap_name = snap.get("name") - snap_confinement = snap.get("confinement") - snap_devmode = snap.get("devmode") - snap_revision = snap.get("revision") + Methods: + _is_snap_in_allow_list(snap_name: str) -> bool: + Check if a snap is in the allowlist. - if snap_name is None: - logging.error("Snap 'name' not found in the snap data.") - exit_code = 1 - continue # Skipping following checks if snap_name not found + _is_snap_confinement_not_strict(snap_confinement: str) -> bool: + Check if a snap's confinement is not 'strict'. - if any( + _is_snap_devmode(snap_devmode: bool) -> bool: + Check if a snap is in devmode. + + _is_snap_sideloaded_revision(snap_revision: str) -> bool: + Check if a snap has a sideloaded revision starting with 'x'. + + _extract_attributes_from_snap(target_snap: dict) -> (bool, dict): + Extract desired attributes from a snap's information. + + verify_snap() -> int: + Perform the snap verification test on all installed snaps. + Returns the exit code: 0 if the test passes for all snaps, + otherwise 1. + """ + def __init__(self) -> None: + self._official_allowlist = [ + r"^bugit$", + r"checkbox.*", + r"^mir-test-tools$", + r"^graphics-test-tools$" + ] + self._allowlist_from_config_var = [ + element.strip() for element in os.environ.get( + "SNAP_CONFINEMENT_ALLOWLIST", "").split(",")] + # Define the attributes of snap we are interested in. + self._desired_attributes = [ + "name", "confinement", "devmode", "revision"] + + def _is_snap_in_allow_list(self, snap_name: str) -> bool: + if snap_name in self._allowlist_from_config_var: + logging.warning( + "This snap is included in the SNAP_CONFINEMENT_ALLOWLIST" + " environment variable, a tester defined checkbox config_var.") + logging.info('Result: Skip') + return True + elif any( re.match(pattern, snap_name) - for pattern in allowlist_snaps + for pattern in self._official_allowlist ): - print("Skipping whitelisted snap: {}".format(snap_name)) - continue + logging.warning( + "This snap is officially defined in the allowlist") + logging.info('Result: Skip') + return True + return False + def _is_snap_confinement_not_strict(self, snap_confinement: str) -> bool: if snap_confinement != "strict": - exit_code = 1 logging.error( - "Snap '%s' confinement is expected to be 'strict' " - "but got '%s'", snap_name, snap_confinement, - ) + "confinement is expected to be 'strict' but got '{}'".format( + snap_confinement)) + return True + return False - if snap_devmode is not False: - exit_code = 1 - logging.error( - "Snap '%s' devmode is expected to be False but " - "got '%s'", snap_name, snap_devmode, - ) + def _is_snap_devmode(self, snap_devmode: bool) -> bool: + if snap_devmode: + logging.error("devmode is expected to be 'False' but got 'True'") + return True + return False + def _is_snap_sideloaded_revision(self, snap_revision: str) -> bool: if snap_revision and snap_revision.startswith("x"): - exit_code = 1 - logging.error( - "Snap '%s' has sideloaded revision '%s', which " - "is not allowed", snap_name, snap_revision, - ) - elif snap_revision is None: - exit_code = 1 logging.error( - "'revision' not found in snap '%s'", snap_name, + "sideloaded revision is '{}', which is not allowed".format( + snap_revision)) + return True + return False + + def _extract_attributes_from_snap(self, target_snap: dict) -> (bool, dict): + return_dict = {} + has_error = False + for attr in self._desired_attributes: + value = target_snap.get(attr) + if value is None: + has_error = True + logging.error( + "Snap '{}' not found in the snap data.".format(attr)) + continue + return_dict.update({attr: value}) + return has_error, return_dict + + def verify_snap(self) -> bool: + exit_code = 0 + snaps_information = Snapd().list() + for snap_info in snaps_information: + tmp_exit_code = 0 + has_error, snap_dict = self._extract_attributes_from_snap( + target_snap=snap_info ) - return exit_code + if has_error: + # Mark as fail and skip current snap's checking + # if any desired attribute is missing + exit_code = 1 + continue + + logging.info( + "=== Checking Snap: {} ===".format(snap_dict.get("name"))) + + # Skip if target snap in allow list + if self._is_snap_in_allow_list(snap_dict.get("name")): + continue + + tmp_exit_code |= self._is_snap_confinement_not_strict( + snap_dict.get("confinement")) + tmp_exit_code |= self._is_snap_devmode(snap_dict.get("devmode")) + tmp_exit_code |= self._is_snap_sideloaded_revision( + snap_dict.get("revision")) + + logging.info( + "Result: {}".format("Fail" if tmp_exit_code else "Pass")) + + exit_code |= tmp_exit_code + return exit_code def main(): - logging.basicConfig(format='%(levelname)s: %(message)s') + logging.basicConfig( + format='%(levelname)s: %(message)s', level=logging.INFO) sub_commands = { "system": test_system_confinement, - "snaps": test_snaps_confinement, + "snaps": SnapsConfinementVerifier().verify_snap, } parser = argparse.ArgumentParser() parser.add_argument("subcommand", type=str, choices=sub_commands) diff --git a/providers/base/tests/test_snap_confinement_test.py b/providers/base/tests/test_snap_confinement_test.py new file mode 100644 index 0000000000..8290f7a7c8 --- /dev/null +++ b/providers/base/tests/test_snap_confinement_test.py @@ -0,0 +1,231 @@ +import logging +import unittest +from unittest.mock import patch, MagicMock +from snap_confinement_test import SnapsConfinementVerifier, main + + +class TestSnapsConfinementVerifier(unittest.TestCase): + @classmethod + def setUpClass(cls): + logging.disable(logging.CRITICAL) + + @classmethod + def tearDownClass(cls): + logging.disable(logging.NOTSET) + + def setUp(self): + self.verifier = SnapsConfinementVerifier() + + def test_snap_in_allow_list_from_config_var(self): + with patch.dict( + 'os.environ', {"SNAP_CONFINEMENT_ALLOWLIST": "sp1, sp2"} + ): + verifier = SnapsConfinementVerifier() + result = verifier._is_snap_in_allow_list("sp2") + self.assertTrue(result) + + def test_snap_in_official_allow_list(self): + result = self.verifier._is_snap_in_allow_list("bugit") + self.assertTrue(result) + + def test_snap_not_in_allow_list(self): + result = self.verifier._is_snap_in_allow_list("non_allowed_snap") + self.assertFalse(result) + + def test_is_snap_confinement_not_strict_catchs_none_strict_snap(self): + result = self.verifier._is_snap_confinement_not_strict("classic") + self.assertTrue(result) + + def test_is_snap_confinement_not_strict_success(self): + result = self.verifier._is_snap_confinement_not_strict("strict") + self.assertFalse(result) + + def test_is_snap_devmode_catchs_devmode_snap(self): + result = self.verifier._is_snap_devmode(True) + self.assertTrue(result) + + def test_is_snap_devmode_success(self): + result = self.verifier._is_snap_devmode(False) + self.assertFalse(result) + + def test_is_snap_sideloaded_revision_catchs_sideload_snap(self): + result = self.verifier._is_snap_sideloaded_revision("x123") + self.assertTrue(result) + + def test_is_snap_sideloaded_revision_success(self): + result = self.verifier._is_snap_sideloaded_revision("y456") + self.assertFalse(result) + + def test_extract_attributes_from_snap_success(self): + self.verifier._desired_attributes = ["hello"] + mock_snap = {"hello": "world", "foo": "bar"} + result = self.verifier._extract_attributes_from_snap(mock_snap) + self.assertEqual(result, (False, {"hello": "world"})) + + def test_extract_attributes_from_snap_missing_desired_attribute(self): + self.verifier._desired_attributes = ["name", "nonexistent_attr"] + mock_snap = {"name": "test_snap"} + result = self.verifier._extract_attributes_from_snap(mock_snap) + self.assertEqual(result, (True, {"name": "test_snap"})) + + @patch("snap_confinement_test.Snapd.list") + def test_verify_snap_no_snaps_from_snapd_list(self, mock_snapd_list): + mock_snapd_list.return_value = [] + self.assertEqual(0, self.verifier.verify_snap()) + + @patch("snap_confinement_test.SnapsConfinementVerifier._extract_attributes_from_snap") # noqa E501 + @patch("snap_confinement_test.Snapd.list") + def test_verify_snap_fail_without_desired_attribute_in_a_snap( + self, + mock_snapd_list, + mock_extract_attributes_from_snap + ): + mock_snapd_list.return_value = [{"foo": "bar"}] + mock_extract_attributes_from_snap.return_value = (True, {}) + self.assertEqual(1, self.verifier.verify_snap()) + + @patch("snap_confinement_test.SnapsConfinementVerifier._is_snap_in_allow_list") # noqa E501 + @patch("snap_confinement_test.SnapsConfinementVerifier._extract_attributes_from_snap") # noqa E501 + @patch("snap_confinement_test.Snapd.list") + def test_verify_snap_pass_if_snap_in_allow_list( + self, + mock_snapd_list, + mock_extract_attributes_from_snap, + mock_is_snap_in_allow_list + ): + snap_info = {"name": "foo"} + mock_snapd_list.return_value = [snap_info] + mock_extract_attributes_from_snap.return_value = (False, snap_info) + mock_is_snap_in_allow_list.return_value = True + result = self.verifier.verify_snap() + mock_is_snap_in_allow_list.assert_called_once_with( + snap_info.get("name")) + self.assertEqual(0, result) + + @patch("snap_confinement_test.SnapsConfinementVerifier._is_snap_sideloaded_revision") # noqa E501 + @patch("snap_confinement_test.SnapsConfinementVerifier._is_snap_devmode") # noqa E501 + @patch("snap_confinement_test.SnapsConfinementVerifier._is_snap_confinement_not_strict") # noqa E501 + @patch("snap_confinement_test.SnapsConfinementVerifier._is_snap_in_allow_list") # noqa E501 + @patch("snap_confinement_test.SnapsConfinementVerifier._extract_attributes_from_snap") # noqa E501 + @patch("snap_confinement_test.Snapd.list") + def test_verify_snap_success( + self, + mock_snapd_list, + mock_extract_attributes_from_snap, + mock_is_snap_in_allow_list, + mock_is_snap_confinement_not_strict, + mock_is_snap_devmode, + mock_is_snap_sideloaded_revision, + ): + """ + Check verify_snap return 0 if a snap mach all the check criteria + """ + snap_info = { + "name": "foo-snap", + "devmode": False, + "confinement": "strict", + "revision": "999" + } + mock_snapd_list.return_value = [snap_info] + mock_extract_attributes_from_snap.return_value = (False, snap_info) + mock_is_snap_in_allow_list.return_value = False + mock_is_snap_confinement_not_strict.return_value = False + mock_is_snap_devmode.return_value = False + mock_is_snap_sideloaded_revision.return_value = False + + result = self.verifier.verify_snap() + mock_extract_attributes_from_snap.assert_called_once_with( + target_snap=snap_info + ) + mock_is_snap_in_allow_list.assert_called_once_with( + snap_info.get("name")) + mock_is_snap_confinement_not_strict.assert_called_once_with( + snap_info.get("confinement")) + mock_is_snap_devmode.assert_called_once_with( + snap_info.get("devmode")) + mock_is_snap_sideloaded_revision.assert_called_once_with( + snap_info.get("revision")) + self.assertEqual(0, result) + + @patch("snap_confinement_test.SnapsConfinementVerifier._is_snap_sideloaded_revision") # noqa E501 + @patch("snap_confinement_test.SnapsConfinementVerifier._is_snap_devmode") # noqa E501 + @patch("snap_confinement_test.SnapsConfinementVerifier._is_snap_confinement_not_strict") # noqa E501 + @patch("snap_confinement_test.SnapsConfinementVerifier._is_snap_in_allow_list") # noqa E501 + @patch("snap_confinement_test.SnapsConfinementVerifier._extract_attributes_from_snap") # noqa E501 + @patch("snap_confinement_test.Snapd.list") + def test_verify_snap_fail_to_match_pass_criteria( + self, + mock_snapd_list, + mock_extract_attributes_from_snap, + mock_is_snap_in_allow_list, + mock_is_snap_confinement_not_strict, + mock_is_snap_devmode, + mock_is_snap_sideloaded_revision, + ): + """ + Check verify_snap return 1 if a snap doesn't reach out the check + criteria + """ + snap_info = { + "name": "foo-snap", + "devmode": True, + "confinement": "not-strict", + "revision": "999" + } + mock_snapd_list.return_value = [snap_info] + mock_extract_attributes_from_snap.return_value = (False, snap_info) + mock_is_snap_in_allow_list.return_value = False + mock_is_snap_confinement_not_strict.return_value = True + mock_is_snap_devmode.return_value = True + mock_is_snap_sideloaded_revision.return_value = False + result = self.verifier.verify_snap() + mock_extract_attributes_from_snap.assert_called_once_with( + target_snap=snap_info + ) + mock_is_snap_in_allow_list.assert_called_once_with( + snap_info.get("name")) + mock_is_snap_confinement_not_strict.assert_called_once_with( + snap_info.get("confinement")) + mock_is_snap_devmode.assert_called_once_with( + snap_info.get("devmode")) + mock_is_snap_sideloaded_revision.assert_called_once_with( + snap_info.get("revision")) + self.assertEqual(1, result) + + +class TestMainFunction(unittest.TestCase): + @patch('snap_confinement_test.test_system_confinement') + @patch('snap_confinement_test.SnapsConfinementVerifier.verify_snap') + @patch('snap_confinement_test.argparse.ArgumentParser') + def test_main_execute_snaps_command( + self, + mock_arg_parser, + mock_verify_snap, + mock_test_system_confinement + ): + mock_args = MagicMock(subcommand='snaps') + mock_arg_parser.return_value.parse_args.return_value = mock_args + result = main() + mock_verify_snap.assert_called_once_with() + mock_test_system_confinement.assert_not_called() + self.assertEqual(result, mock_verify_snap.return_value) + + @patch('snap_confinement_test.test_system_confinement') + @patch('snap_confinement_test.SnapsConfinementVerifier.verify_snap') + @patch('snap_confinement_test.argparse.ArgumentParser') + def test_main_execute_system_command( + self, + mock_arg_parser, + mock_verify_snap, + mock_test_system_confinement + ): + mock_args = MagicMock(subcommand='system') + mock_arg_parser.return_value.parse_args.return_value = mock_args + result = main() + mock_test_system_confinement.assert_called_once_with() + mock_verify_snap.assert_not_called() + self.assertEqual(result, mock_test_system_confinement.return_value) + + +if __name__ == '__main__': + unittest.main() From 9f1a491d0c2cbbc00cdf84141e9e78360958a8ad Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Thu, 1 Feb 2024 14:12:26 +0100 Subject: [PATCH 019/108] Update many missing categories (BugFix) (#975) Update many missing categories --- .../plainbox/impl/providers/manifest/units/manifest.pxu | 4 +++- providers/base/units/acpi/jobs.pxu | 1 + providers/base/units/cpu/jobs.pxu | 2 ++ providers/base/units/cpu/resource.pxu | 1 + providers/base/units/info/jobs.pxu | 3 ++- providers/base/units/install/jobs.pxu | 2 +- providers/base/units/suspend/suspend.pxu | 1 + providers/base/units/zapper/resource.pxu | 1 + providers/iiotg/units/usb-dwc3/jobs.pxu | 1 + 9 files changed, 13 insertions(+), 3 deletions(-) diff --git a/checkbox-ng/plainbox/impl/providers/manifest/units/manifest.pxu b/checkbox-ng/plainbox/impl/providers/manifest/units/manifest.pxu index f96960c4b4..5e6549c50a 100644 --- a/checkbox-ng/plainbox/impl/providers/manifest/units/manifest.pxu +++ b/checkbox-ng/plainbox/impl/providers/manifest/units/manifest.pxu @@ -1,5 +1,6 @@ unit: job id: collect-manifest +category_id: com.canonical.plainbox::info _summary: Collect the hardware manifest (interactively) _description: This job interactively asks the user about each manifest entry and stores the @@ -12,10 +13,11 @@ flags: preserve-locale use-chunked-io unit: job id: manifest +category_id: com.canonical.plainbox::info _summary: Hardware Manifest _description: This job loads the hardware manifest and exposes it as a resource. plugin: resource -command: plainbox-manifest-resource +command: plainbox-manifest-resource estimated_duration: 1 flags: preserve-locale diff --git a/providers/base/units/acpi/jobs.pxu b/providers/base/units/acpi/jobs.pxu index e8c288f864..9578bc9512 100644 --- a/providers/base/units/acpi/jobs.pxu +++ b/providers/base/units/acpi/jobs.pxu @@ -3,6 +3,7 @@ _summary: test ACPI OEM _OSI strings _description: This checks if the depreciated OEM _OSI strings are still used by checking the ACPI DSDT and SSDT tables plugin: shell +category_id: acpi requires: cpuinfo.platform in ("i386", "x86_64") user: root diff --git a/providers/base/units/cpu/jobs.pxu b/providers/base/units/cpu/jobs.pxu index 02fd3409b1..1106d68c6f 100644 --- a/providers/base/units/cpu/jobs.pxu +++ b/providers/base/units/cpu/jobs.pxu @@ -209,6 +209,7 @@ requires: executable.name == 'fwts' cpuinfo.platform not in ("aarch64", "armv7l", "s390x") user: root +category_id: com.canonical.plainbox::cpu _summary: Run C-States tests _description: @@ -224,6 +225,7 @@ plugin:shell id: after-suspend-cpu/cstates depends: com.canonical.certification::suspend/suspend_advanced_auto estimated_duration: 10.0 +category_id: com.canonical.plainbox::cpu requires: executable.name == 'fwts' cpuinfo.platform not in ("aarch64", "armv7l", "s390x") diff --git a/providers/base/units/cpu/resource.pxu b/providers/base/units/cpu/resource.pxu index 9a4603f2fb..96161d0ede 100644 --- a/providers/base/units/cpu/resource.pxu +++ b/providers/base/units/cpu/resource.pxu @@ -1,6 +1,7 @@ id: cpu_offlining estimated_duration: 0.02 plugin: resource +category_id: com.canonical.plainbox::cpu command: if ls /sys/devices/system/cpu/*/online >& /dev/null then diff --git a/providers/base/units/info/jobs.pxu b/providers/base/units/info/jobs.pxu index 7f2b9dbfe3..730a257349 100644 --- a/providers/base/units/info/jobs.pxu +++ b/providers/base/units/info/jobs.pxu @@ -199,6 +199,7 @@ _description: Attaches the output of udev_resource.py, for debugging purposes id: lsblk_attachment estimated_duration: 0.1 plugin: attachment +category_id: com.canonical.plainbox::info command: lsblk -i -n -P -o KNAME,TYPE,MOUNTPOINT requires: executable.name == 'lsblk' @@ -465,7 +466,7 @@ command: id: lstopo_verbose_attachment plugin: attachment category_id: com.canonical.plainbox::info -command: lstopo -v +command: lstopo -v estimated_duration: 0.015 requires: executable.name == 'lstopo' _description: Attaches the system topology as presented by the lstopo command diff --git a/providers/base/units/install/jobs.pxu b/providers/base/units/install/jobs.pxu index ae221036e5..c6a1420798 100644 --- a/providers/base/units/install/jobs.pxu +++ b/providers/base/units/install/jobs.pxu @@ -1,4 +1,5 @@ plugin: shell +category_id: com.canonical.plainbox::miscellanea id: install/apt-get-gets-updates requires: package.name == 'apt' user: root @@ -7,4 +8,3 @@ _description: Tests to see that apt can access repositories and get updates (does not install updates). This is done to confirm that you could recover from an incomplete or broken update. - diff --git a/providers/base/units/suspend/suspend.pxu b/providers/base/units/suspend/suspend.pxu index b3c9cff1cd..ad7e466cf5 100644 --- a/providers/base/units/suspend/suspend.pxu +++ b/providers/base/units/suspend/suspend.pxu @@ -2201,6 +2201,7 @@ _description: id: suspend/oops_after_suspend depends: suspend/suspend_advanced_auto plugin:shell +category_id: com.canonical.plainbox::suspend estimated_duration: 10.0 requires: executable.name == 'fwts' user: root diff --git a/providers/base/units/zapper/resource.pxu b/providers/base/units/zapper/resource.pxu index fcfe8a8052..a9a1231d21 100644 --- a/providers/base/units/zapper/resource.pxu +++ b/providers/base/units/zapper/resource.pxu @@ -1,5 +1,6 @@ id: zapper_capabilities plugin: resource +category_id: com.canonical.plainbox::info _summary: Get Zapper's setup capabilities _description: Connect to Zapper and list functions that the current setup (DUT + Zapper) are diff --git a/providers/iiotg/units/usb-dwc3/jobs.pxu b/providers/iiotg/units/usb-dwc3/jobs.pxu index 272cb18576..1898608aa5 100644 --- a/providers/iiotg/units/usb-dwc3/jobs.pxu +++ b/providers/iiotg/units/usb-dwc3/jobs.pxu @@ -62,6 +62,7 @@ flags: preserve-locale id: usb-dwc3/mass-storage-cleanup _summary: Cleanup mass storage setup after mass storage device test plugin: shell +category_id: usb-dwc3 after: usb-dwc3/mass-storage command: echo "Removing g_mass_storage module..." From 329c0465a4059465abe5d8f30218df2dc620ef0b Mon Sep 17 00:00:00 2001 From: Remy MARTIN Date: Mon, 5 Feb 2024 06:18:21 +0100 Subject: [PATCH 020/108] Improve suspend/suspend_advanced* tests #857 (BugFix) (#979) Improve suspend/suspend_advanced* tests #857 rtwake can sometimes fail to set the alarm. While running the command in background before suspending the system, in such a case the whole test plan will hang. This patch change the mode to allow the command to return immediately, while still setting the alarm, which allow to serialize its result. --- providers/base/units/suspend/suspend.pxu | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/providers/base/units/suspend/suspend.pxu b/providers/base/units/suspend/suspend.pxu index ad7e466cf5..83b8642b2f 100644 --- a/providers/base/units/suspend/suspend.pxu +++ b/providers/base/units/suspend/suspend.pxu @@ -177,13 +177,12 @@ command: echo "Calling fwts" set -o pipefail; checkbox-support-fwts_test -f none -l "$PLAINBOX_SESSION_SHARE"/suspend_single.log -s s3 --s3-sleep-delay=30 --s3-device-check --s3-device-check-delay=45 | tee "$PLAINBOX_SESSION_SHARE"/suspend_single_times.log else - echo "Calling rtcwake" if [ -z "$RTC_DEVICE_FILE" ]; then - rtcwake -m on -s 30 & - systemctl suspend + echo "Calling rtcwake" + rtcwake -m no -s 30 && systemctl suspend || exit 1 else - rtcwake -d "$RTC_DEVICE_FILE" -m on -s 30 & - systemctl suspend + echo "Calling rtcwake with -d $RTC_DEVICE_FILE" + rtcwake -d "$RTC_DEVICE_FILE" -m no -s 30 && systemctl suspend || exit 1 fi fi else @@ -262,14 +261,13 @@ command: echo "Calling fwts" set -o pipefail; checkbox-support-fwts_test -f none -l "$PLAINBOX_SESSION_SHARE"/suspend_single.log -s s3 --s3-sleep-delay=30 --s3-device-check --s3-device-check-delay=45 | tee "$PLAINBOX_SESSION_SHARE"/suspend_single_times.log else - echo "Calling rtcwake" - if [ -z "$RTC_DEVICE_FILE" ]; then - rtcwake -m on -s 30 & - systemctl suspend - else - rtcwake -d "$RTC_DEVICE_FILE" -m on -s 30 & - systemctl suspend - fi + if [ -z "$RTC_DEVICE_FILE" ]; then + echo "Calling rtcwake" + rtcwake -m no -s 30 && systemctl suspend || exit 1 + else + echo "Calling rtcwake with -d $RTC_DEVICE_FILE" + rtcwake -d "$RTC_DEVICE_FILE" -m no -s 30 && systemctl suspend || exit 1 + fi fi estimated_duration: 90.000 From aa7bdd3e1d724e609b5696a85003dabb10fe0f3e Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 7 Feb 2024 15:30:56 +0100 Subject: [PATCH 021/108] Fix and refactor gateway_ping_test (Bugfix) (#981) * Fix and refactor gateway_ping_test init * flake8 complaint * Test all utility functions * Test gateway ping reachability functions * Test also top level test gateway ping test * Tests for broadcast and netwrokctl * Also test from_ip methods * Refactor ping and update tests * Python3.5 compatibility * Update all docstring * Updated year and removed pointless param --- providers/base/bin/gateway_ping_test.py | 445 +++++++++++----- .../base/tests/test_gateway_ping_test.py | 499 +++++++++++++++++- 2 files changed, 805 insertions(+), 139 deletions(-) diff --git a/providers/base/bin/gateway_ping_test.py b/providers/base/bin/gateway_ping_test.py index 1df8270300..33bcc64cf6 100755 --- a/providers/base/bin/gateway_ping_test.py +++ b/providers/base/bin/gateway_ping_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # This file is part of Checkbox. # -# Copyright 2007-2014 Canonical Ltd. +# Copyright 2007-2024 Canonical Ltd. # Written by: # Brendan Donegan # Daniel Manrique @@ -11,6 +11,7 @@ # Marc Tardif # Mathieu Trudel-Lapierre # Zygmunt Krynicki +# Massimiliano Girardi # # Checkbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, @@ -26,7 +27,6 @@ from gettext import gettext as _ import argparse -import errno import gettext import logging import os @@ -37,18 +37,46 @@ import sys import time +from contextlib import suppress + class Route: """ Gets routing information from the system. """ + def __init__(self, interface): + self.interface = interface + def _num_to_dotted_quad(self, number): """ Convert long int to dotted quad string """ return socket.inet_ntoa(struct.pack("\w+)\s+00000000\s+" - r"(?P[\w]+)\s+") - w = h.search(route) - if w: - if w.group("def_gateway"): - return self._num_to_dotted_quad( - int(w.group("def_gateway"), 16)) - else: - logging.error( - _("Could not find def gateway info in /proc")) - return None - else: - logging.error(_("Could not find def gateway info in /proc")) - return None + + proc_table_re = re.compile( + r"\n(?P\w+)\s+00000000\s+" r"(?P[\w]+)\s+" + ) + proc_table_lines = proc_table_re.finditer(route) + for proc_table_line in proc_table_lines: + def_gateway = proc_table_line.group("def_gateway") + interface = proc_table_line.group("interface") + if interface == self.interface and def_gateway: + return self._num_to_dotted_quad(int(def_gateway, 16)) + logging.error(_("Could not find def gateway info in /proc")) + return None def _get_default_gateway_from_bin_route(self): """ @@ -83,151 +108,297 @@ def _get_default_gateway_from_bin_route(self): and is only used if could not get that from /proc """ logging.debug( - _("Reading default gateway information from route binary")) - routebin = subprocess.getstatusoutput( - "export LANGUAGE=C; " "/usr/bin/env route -n") - if routebin[0] == 0: - h = re.compile(r"\n0.0.0.0\s+(?P[\w.]+)\s+") - w = h.search(routebin[1]) - if w: - def_gateway = w.group("def_gateway") - if def_gateway: - return def_gateway + _("Reading default gateway information from route binary") + ) + try: + routebin = subprocess.check_output( + ["/usr/bin/env", "route", "-n"], + env={"LANGUAGE": "C"}, + universal_newlines=True, + ) + except subprocess.CalledProcessError: + return None + route_line_re = re.compile( + r"^0\.0\.0\.0\s+(?P[\w.]+)(?P.+)", + flags=re.MULTILINE, + ) + route_lines = route_line_re.finditer(routebin) + for route_line in route_lines: + def_gateway = route_line.group("def_gateway") + interface = route_line.group("tail").rsplit(" ", 1)[-1] + if interface == self.interface and def_gateway: + return def_gateway logging.error(_("Could not find default gateway by running route")) return None - def get_hostname(self): - return socket.gethostname() - - def get_default_gateway(self): - t1 = self._get_default_gateway_from_proc() - if not t1: - t1 = self._get_default_gateway_from_bin_route() - return t1 - - -def get_host_to_ping(interface=None, verbose=False, default=None): - # Get list of all IPs from all my interfaces, - interface_list = subprocess.check_output(["ip", "-o", 'addr', 'show']) - reg = re.compile(r'\d: (?P\w+) +inet (?P
[\d\.]+)/' - r'(?P[\d]+) brd (?P[\d\.]+)') - # Will magically exclude lo because it lacks brd field - interfaces = reg.findall(interface_list.decode()) - # ping -b the network on each one (one ping only) - # exclude the ones not specified in iface - for iface in interfaces: - if not interface or iface[0] == interface: - # Use check_output even if I'll discard the output - # looks cleaner than using .call and redirecting stdout to null - try: - subprocess.check_output(["ping", "-q", "-c", "1", "-b", - iface[3]], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - pass - # If default host given, ping it as well, - # to try to get it into the arp table. - # Needed in case it's not responding to broadcasts. - if default: + def _get_ip_addr_info(self): + return subprocess.check_output( + ["ip", "-o", "addr", "show"], universal_newlines=True + ) + + def get_broadcast(self): + # Get list of all IPs from all my interfaces, + ip_addr_infos = self._get_ip_addr_info() + for addr_info_line in ip_addr_infos.splitlines(): + # id: if_name inet addr/mask brd broadcast + addr_info_fields = addr_info_line.split() + with suppress(IndexError): + if ( + addr_info_fields[1] == self.interface + and addr_info_fields[4] == "brd" + ): + return addr_info_fields[5] + raise ValueError( + "Unable to determine broadcast for iface {}".format(self.interface) + ) + + def _get_default_gateway_from_networkctl(self): try: - subprocess.check_output(["ping", "-q", "-c", "1", default], - stderr=subprocess.STDOUT) + network_info = subprocess.check_output( + [ + "networkctl", + "status", + "--no-pager", + "--no-legend", + self.interface, + ], + universal_newlines=True, + ) except subprocess.CalledProcessError: - pass - # Try to get the gateway address for the interface from networkctl - cmd = 'networkctl status --no-pager --no-legend {}'.format(interface) - try: - output = subprocess.check_output(cmd, shell=True) - for line in output.decode(sys.stdout.encoding).splitlines(): - vals = line.strip().split(' ') - if len(vals) >= 2: - if vals[0] == 'Gateway:': - subprocess.check_output(["ping", "-q", "-c", "1", vals[1]], - stderr=subprocess.STDOUT) - break - except subprocess.CalledProcessError: - pass - ARP_POPULATE_TRIES = 10 - num_tries = 0 - while num_tries < ARP_POPULATE_TRIES: + return None + for line in network_info.splitlines(): + line = line.strip() + if line.startswith("Gateway:"): + return line.split()[-1] + return None + + def get_default_gateways(self) -> set: + """ + Use multiple sources to get the default gateway to be robust to + possible platform bugs + """ + def_gateways = { + self._get_default_gateway_from_ip(), + self._get_default_gateway_from_proc(), + self._get_default_gateway_from_bin_route(), + self._get_default_gateway_from_networkctl(), + } + def_gateways -= {None} + if len(def_gateways) > 1: + logging.warning( + "Found more than one default gateway for interface {}".format( + self.interface + ) + ) + return def_gateways + + @classmethod + def get_interface_from_ip(cls, ip): + # Note: this uses -o instead of -j for xenial/bionic compatibility + route_info = subprocess.check_output( + ["ip", "-o", "route", "get", ip], universal_newlines=True + ) + for line in route_info.splitlines(): + # ip dev device_name src ... + fields = line.split() + if len(fields) > 3: + return fields[2] + raise ValueError( + "Unable to determine any device used for {}".format(ip) + ) + + @classmethod + def get_any_interface(cls): + # Note: this uses -o instead of -j for xenial/bionic compatibility + route_infos = subprocess.check_output( + ["ip", "-o", "route", "show", "default", "0.0.0.0/0"], + universal_newlines=True, + ) + for route_info in route_infos.splitlines(): + route_info_fields = route_info.split() + if len(route_info_fields) > 5: + return route_info_fields[4] + raise ValueError("Unable to determine any valid interface") + + @classmethod + def from_ip(cls, ip: str): + """ + Build an instance of Route given an ip, if no ip is provided the best + interface that can route to 0.0.0.0/0 is selected (as described by + metric) + """ + if ip: + interface = Route.get_interface_from_ip(ip) + return Route(interface) + return Route(Route.get_any_interface()) + + +def is_reachable(ip, interface, verbose=False): + """ + Ping an ip to see if it is reachable + """ + result = ping(ip, interface, 3, 10, verbose=verbose) + return result["transmitted"] >= result["received"] > 0 + + +def get_default_gateway_reachable_on(interface: str) -> str: + """ + Returns the default gateway of an interface if it is reachable + """ + if not interface: + raise ValueError("Unable to ping on interface None") + route = Route(interface=interface) + desired_targets = route.get_default_gateways() + for desired_target in desired_targets: + if is_reachable(desired_target, interface): + return desired_target + raise ValueError( + "Unable to reach any estimated gateway of interface {}".format( + interface + ), + ) + + +def get_any_host_reachable_on(interface: str, verbose=False) -> str: + """ + Returns any host that it can reach from a given interface + """ + if not interface: + raise ValueError("Unable to ping on interface None") + route = Route(interface=interface) + broadcast = route.get_broadcast() + arp_parser_re = re.compile( + r"\? \((?P[\d.]+)\) at (?P[a-f0-9\:]+) " + r"\[ether\] on (?P[\w\d]+)" + ) + # retry a few times to get something in the arp table + for i in range(10): + ping(broadcast, interface, 1, 1, broadcast=True, verbose=verbose) # Get output from arp -a -n to get known IPs - known_ips = subprocess.check_output(["arp", "-a", "-n"]) - reg = re.compile(r'\? \((?P[\d.]+)\) at (?P[a-f0-9\:]+) ' - r'\[ether\] on (?P[\w\d]+)') - # Filter (if needed) IPs not on the specified interface - pingable_ips = [pingable[0] for pingable in reg.findall( - known_ips.decode()) if not interface or - pingable[2] == interface] - # If the default given ip is among the remaining ones, - # ping that. - if default and default in pingable_ips: - if verbose: - print(_( - "Desired ip address {0} is reachable, using it" - ).format(default)) - return default - # If not, choose another IP. - address_to_ping = pingable_ips[0] if len(pingable_ips) else None - if verbose: - print(_( - "Desired ip address {0} is not reachable from {1}," - " using {2} instead" - ).format(default, interface, address_to_ping)) - if address_to_ping: - return address_to_ping - time.sleep(2) - num_tries += 1 - # Wait time expired + arp_table = subprocess.check_output( + ["arp", "-a", "-n"], universal_newlines=True + ) + hosts_in_arp_table = [ + arp_entry.group("ip") # ip + for arp_entry in arp_parser_re.finditer(arp_table) + if arp_entry.group("iface") == interface + ] + # we don't know how an ip got in the arp table, lets try to reach them + # and return the first that we can acutally reach + for host in hosts_in_arp_table: + if is_reachable(host, interface): + return host + # we were unable to get any reachable host in the arp table, this may + # be due to a slow network, lets retry in a few seconds + time.sleep(5) + raise ValueError( + "Unable to reach any host on interface {}".format(interface) + ) + + +def get_host_to_ping( + interface: str, target: str = None, verbose=False +) -> "str|None": + """ + Attempts to determine a reachable host to ping on the specified network + interface. First it tries to ping the provided target. If no target is + specified or the target is not reachable, it then attempts to find a + reachable host by trying the default gateway and finally falling back on + any host on the network interface. + + @returns: The reachable host if any, else None + """ + # Try to use the provided target if it is reachable + if target and is_reachable(target, interface): + return target + # From here onward, lets try to estimate a reachable target + if not interface: + route = Route.from_ip(target) + interface = route.interface + + # Try first with any default gateway that we can gather on the interface + with suppress(ValueError): + return get_default_gateway_reachable_on(interface) + + # Try with any host we can find reachable on the interface + with suppress(ValueError): + return get_any_host_reachable_on(interface) + + # Unable to estimate any host to reach return None -def ping(host, interface, count, deadline, verbose=False): - command = ["ping", str(host), "-c", str(count), "-w", str(deadline)] +def ping( + host: str, + interface: "str|None", + count: int, + deadline: int, + broadcast=False, + verbose=False, +): + """ + pings an host via an interface count times within the given deadline. + If the interface is None, it will not be used. + If the host is a broadcast host, use the broadcast kwarg + + @returns: on success the stats of the ping "transmitted", "received" and + "pct_loss" + @returns: on failure a dict with a "cause" key, with the failure reason + """ + command = ["ping", str(host), "-c", str(count), "-w", str(deadline)] if interface: command.append("-I{}".format(interface)) + if broadcast: + command.append("-b") reg = re.compile( r"(\d+) packets transmitted, (\d+) received," - r".*([0-9]*\.?[0-9]*.)% packet loss") - ping_summary = {'transmitted': 0, 'received': 0, 'pct_loss': 0} + r".*([0-9]*\.?[0-9]*.)% packet loss" + ) + ping_summary = {"transmitted": 0, "received": 0, "pct_loss": 0} try: output = subprocess.check_output( - command, universal_newlines=True, stderr=subprocess.PIPE) - except OSError as exc: - if exc.errno == errno.ENOENT: - # No ping command present; - # default exception message is informative enough. - print(exc) - else: - raise - except subprocess.CalledProcessError as excp: + command, universal_newlines=True, stderr=subprocess.PIPE + ) + except (OSError, FileNotFoundError) as e: + ping_summary["cause"] = str(e) + return ping_summary + except subprocess.CalledProcessError as e: # Ping returned fail exit code - print(_("ERROR: ping result: {0}").format(excp)) - if excp.stderr: - print(excp.stderr) - if 'SO_BINDTODEVICE' in excp.stderr: - ping_summary['cause'] = ( - "Could not bind to the {} interface.".format(interface)) - else: - if verbose: - print(output) - received = re.findall(reg, output) - if received: - ping_summary = received[0] - ping_summary = { - 'transmitted': int(ping_summary[0]), - 'received': int(ping_summary[1]), - 'pct_loss': int(ping_summary[2])} + # broadcast will always do so + if broadcast: + return + ping_summary[ + "cause" + ] = "Failed with exception: {}\nstdout: {}\nstderr: {}".format( + str(e), e.stdout, e.stderr + ) + return ping_summary + if verbose: + print(output) + try: + received = next(re.finditer(reg, output)) + ping_summary = { + "transmitted": int(received.group(1)), + "received": int(received.group(2)), + "pct_loss": int(received.group(3)), + } + except StopIteration: + ping_summary[ + "cause" + ] = "Failed to parse the stats from the ping output. Log: {}".format( + output + ) return ping_summary def parse_args(argv): default_count = 2 default_delay = 4 - route = Route() parser = argparse.ArgumentParser() parser.add_argument( "host", nargs="?", - default=route.get_default_gateway(), + default=None, help=_("host to ping"), ) parser.add_argument( @@ -305,7 +476,7 @@ def main(argv) -> int: # If given host is not pingable, override with something pingable. host = get_host_to_ping( - interface=args.interface, verbose=args.verbose, default=args.host + interface=args.interface, verbose=args.verbose, target=args.host ) if args.verbose: print(_("Checking connectivity to {0}").format(host)) diff --git a/providers/base/tests/test_gateway_ping_test.py b/providers/base/tests/test_gateway_ping_test.py index 4a2a253a4a..ecebc6a2b9 100644 --- a/providers/base/tests/test_gateway_ping_test.py +++ b/providers/base/tests/test_gateway_ping_test.py @@ -1,6 +1,501 @@ import unittest -from unittest.mock import patch -from gateway_ping_test import main, parse_args +import textwrap +import subprocess +from unittest.mock import patch, MagicMock, mock_open +from gateway_ping_test import ( + main, + parse_args, + ping, + Route, + is_reachable, + get_default_gateway_reachable_on, + get_any_host_reachable_on, + get_host_to_ping, +) + + +class TestRoute(unittest.TestCase): + @patch("subprocess.check_output") + def test__get_default_gateway_from_ip_nominal(self, mock_check_output): + ok_output = ( + "default via 192.168.1.1 proto dhcp src 192.168.1.119 metric 100" + ) + mock_check_output.return_value = ok_output + expected_gateway = "192.168.1.1" + self_mock = MagicMock() + self_mock.interface = "eth0" + self.assertEqual( + Route._get_default_gateway_from_ip(self_mock), expected_gateway + ) + + @patch("subprocess.check_output") + def test__get_default_gateway_from_ip_noroute(self, mock_check_output): + mock_check_output.return_value = "" + self_mock = MagicMock() + self_mock.interface = "eth0" + self.assertIsNone(Route._get_default_gateway_from_ip(self_mock)) + + @patch("subprocess.check_output") + def test__get_default_gateway_from_ip_invalid_route( + self, mock_check_output + ): + invalid_output = "invalid routing information" + mock_check_output.return_value = invalid_output + self_mock = MagicMock() + self_mock.interface = "eth0" + self.assertIsNone(Route._get_default_gateway_from_ip(self_mock)) + + @patch("subprocess.check_output") + def test__get_default_gateway_from_ip_crash( + self, mock_check_output + ): + mock_check_output.side_effect = subprocess.CalledProcessError(1, "") + self_mock = MagicMock() + self_mock.interface = "eth0" + self.assertIsNone(Route._get_default_gateway_from_ip(self_mock)) + + def test__get_default_gateway_from_proc_nominal(self): + self_mock = MagicMock() + + def _num_to_dotted_quad(x): + return Route._num_to_dotted_quad(None, x) + + self_mock._num_to_dotted_quad = _num_to_dotted_quad + self_mock.interface = "eth0" + expected_gateway = "192.168.1.1" + output_sample = textwrap.dedent( + """ + Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT + eth0 00000000 0101A8C0 0003 0 0 0 00000000 0 0 0 + """ + ) + with patch( + "builtins.open", new_callable=mock_open, read_data=output_sample + ): + self.assertEqual( + Route._get_default_gateway_from_proc(self_mock), + expected_gateway, + ) + + def test__get_default_gateway_from_proc_file_error(self): + self_mock = MagicMock() + + def _num_to_dotted_quad(x): + return Route._num_to_dotted_quad(None, x) + + self_mock._num_to_dotted_quad = _num_to_dotted_quad + self_mock.interface = "eth0" + with patch( + "builtins.open", side_effect=FileNotFoundError("File not found") + ): + self.assertIsNone( + Route._get_default_gateway_from_proc(self_mock), + ) + + @patch("subprocess.check_output") + def test__get_default_gateway_from_bin_route_nominal( + self, mock_check_output + ): + mock_check_output.return_value = textwrap.dedent( + """ + Kernel IP routing table + Destination Gateway Genmask Flags Metric Ref Use Iface + 0.0.0.0 192.168.1.1 0.0.0.0 UG 100 0 0 enp5s0 + 0.0.0.0 192.168.1.100 0.0.0.0 UG 600 0 0 wlan0 + """ + ) + self_mock = MagicMock() + self_mock.interface = "wlan0" + gateway = Route._get_default_gateway_from_bin_route(self_mock) + self.assertEqual(gateway, "192.168.1.100") + + @patch("subprocess.check_output") + def test__get_default_gateway_from_bin_route_if_not_found( + self, mock_check_output + ): + mock_check_output.return_value = textwrap.dedent( + """ + Kernel IP routing table + Destination Gateway Genmask Flags Metric Ref Use Iface + 0.0.0.0 192.168.1.1 0.0.0.0 UG 100 0 0 enp5s0 + 0.0.0.0 192.168.1.100 0.0.0.0 UG 600 0 0 wlan0 + """ + ) + self_mock = MagicMock() + self_mock.interface = "enp1s0" + gateway = Route._get_default_gateway_from_bin_route(self_mock) + self.assertIsNone(gateway) + + def test_get_broadcast(self): + self_mock = MagicMock() + self_mock.interface = "enp5s0" + self_mock._get_ip_addr_info.return_value = textwrap.dedent( + """ + 1: lo inet 127.0.0.1/8 scope host lo\\ valid_lft forever ... + 3: wlan0 inet 192.168.1.115/24 brd 192.168.1.255 scope ... + 4: enp5s0 inet 192.168.1.119/24 brd 192.168.1.255 scope ... + """ + ) + self.assertEqual(Route.get_broadcast(self_mock), "192.168.1.255") + + def test_get_broadcast_not_found(self): + self_mock = MagicMock() + self_mock.interface = "eth0" + self_mock._get_ip_addr_info.return_value = textwrap.dedent( + """ + 1: lo inet 127.0.0.1/8 scope host lo\\ valid_lft forever ... + 3: wlan0 inet 192.168.1.115/24 brd 192.168.1.255 scope ... + 4: enp5s0 inet 192.168.1.119/24 brd 192.168.1.255 scope ... + """ + ) + with self.assertRaises(ValueError): + Route.get_broadcast(self_mock) + + @patch("subprocess.check_output") + def test__get_default_gateway_from_networkctl_ok(self, mock_check_output): + mock_check_output.return_value = textwrap.dedent( + """ + systemd-networkd is not running, output might be incomplete. + Failed to query link bit rates: Could not activate remote peer: + activation request failed: unknown unit. + Failed to query link DHCP leases: Could not activate remote peer: + activation request failed: unknown unit. + + ● 4: enp5s0 + Link File: /usr/lib/systemd/network/99-default.link + Address: 192.168.1.119 + fe80::eb41:84fa:6da5:c815 + Gateway: 192.168.1.1 + + feb 00 00:00:00 ... systemd-resolved[1430]: enp5s0: Systemd log line + """ + ) + def_gateway = Route("enp5s0")._get_default_gateway_from_networkctl() + self.assertEqual(def_gateway, "192.168.1.1") + + @patch("subprocess.check_output") + def test__get_default_gateway_from_networkctl_no_gateway( + self, mock_check_output + ): + mock_check_output.return_value = textwrap.dedent( + """ + systemd-networkd is not running, output might be incomplete. + Failed to query link bit rates: Could not activate remote peer: + activation request failed: unknown unit. + Failed to query link DHCP leases: Could not activate remote peer: + activation request failed: unknown unit. + + ● 4: enp5s0 + Link File: /usr/lib/systemd/network/99-default.link + Address: 192.168.1.119 + fe80::eb41:84fa:6da5:c815 + + feb 00 00:00:00 ... systemd-resolved[1430]: enp5s0: Systemd log line + """ + ) + def_gateway = Route("enp5s0")._get_default_gateway_from_networkctl() + self.assertIsNone(def_gateway) + + @patch("subprocess.check_output") + def test__get_default_gateway_from_networkctl_failure( + self, mock_check_output + ): + mock_check_output.side_effect = subprocess.CalledProcessError(1, "") + def_gateway = Route("enp5s0")._get_default_gateway_from_networkctl() + self.assertIsNone(def_gateway) + + @patch("subprocess.check_output") + def test__get_default_gateway_from_bin_route_exception( + self, mock_check_output + ): + mock_check_output.side_effect = subprocess.CalledProcessError(1, "") + self_mock = MagicMock() + self_mock.interface = "enp1s0" + gateway = Route._get_default_gateway_from_bin_route(self_mock) + self.assertIsNone(gateway) + + def test_get_default_gateways(self): + self_mock = MagicMock() + self_mock._get_default_gateway_from_ip.return_value = "192.168.1.1" + self_mock._get_default_gateway_from_proc.return_value = "192.168.1.1" + self_mock._get_default_gateway_from_bin_route.return_value = None + self_mock._get_default_gateway_from_networkctl.return_value = None + + self.assertEqual( + Route.get_default_gateways(self_mock), {"192.168.1.1"} + ) + + @patch("logging.warning") + def test_get_default_gateways_warns(self, mock_warn): + self_mock = MagicMock() + self_mock._get_default_gateway_from_ip.return_value = None + self_mock._get_default_gateway_from_proc.return_value = None + self_mock._get_default_gateway_from_bin_route.return_value = ( + "192.168.1.1" + ) + self_mock._get_default_gateway_from_networkctl.return_value = ( + "192.168.1.2" + ) + + self.assertEqual( + Route.get_default_gateways(self_mock), + {"192.168.1.1", "192.168.1.2"}, + ) + self.assertTrue(mock_warn.called) + + @patch( + "subprocess.check_output", + return_value="192.168.1.203 dev enp5s0 src 192.168.1.119 uid 1000", + ) + def test_get_interface_from_ip_ok(self, mock_check_output): + self.assertEqual( + Route.get_interface_from_ip("192.168.1.203"), "enp5s0" + ) + + @patch("subprocess.check_output", return_value="") + def test_get_interface_from_ip_no_route(self, mock_check_output): + with self.assertRaises(ValueError): + Route.get_interface_from_ip("192.168.1.203") + + @patch("subprocess.check_output") + def test_get_any_interface_from_ip_ok(self, mock_check_output): + mock_check_output.return_value = textwrap.dedent( + """ + default via 192.168.1.1 dev enp5s0 proto dhcp src 192.168.1.119 metric 100 + default via 192.168.1.1 dev wlan0 proto dhcp src 192.168.1.115 metric 600 + """ + ) + self.assertEqual(Route.get_any_interface(), "enp5s0") + + @patch("subprocess.check_output", return_value="") + def test_get_any_interface_from_ip_not_found(self, mock_check_output): + with self.assertRaises(ValueError): + Route.get_any_interface() + + @patch("gateway_ping_test.Route.get_interface_from_ip") + def test_from_ip(self, mock_get_interface_from_ip): + mock_get_interface_from_ip.return_value = "enp6s0" + self.assertEqual(Route.from_ip("192.168.3.3").interface, "enp6s0") + self.assertTrue(mock_get_interface_from_ip.called) + + @patch("gateway_ping_test.Route.get_any_interface") + def test_from_ip_none(self, mock_get_interface_from_ip): + mock_get_interface_from_ip.return_value = "enp6s0" + self.assertEqual(Route.from_ip(None).interface, "enp6s0") + self.assertTrue(mock_get_interface_from_ip.called) + + +class TestUtilityFunctions(unittest.TestCase): + @patch("gateway_ping_test.ping") + def test_is_reachable(self, mock_ping): + mock_ping.return_value = {"transmitted": 3, "received": 2} + self.assertTrue(is_reachable("10.0.0.1", "eth0")) + + @patch("gateway_ping_test.ping") + def test_is_reachable_false(self, mock_ping): + mock_ping.return_value = {"transmitted": 0, "received": 0} + self.assertFalse(is_reachable("10.0.0.1", "eth0")) + + +class TestReachabilityFunctions(unittest.TestCase): + @patch("gateway_ping_test.Route") + @patch("gateway_ping_test.is_reachable", return_value=True) + def test_get_default_gateway_reachable_on_gateway_reachable( + self, mock_is_reachable, mock_route + ): + mock_route.return_value.get_default_gateways.return_value = [ + "192.168.1.1" + ] + interface = "eth0" + result = get_default_gateway_reachable_on(interface) + self.assertEqual(result, "192.168.1.1") + mock_route.assert_called_once_with(interface=interface) + mock_is_reachable.assert_called_once_with("192.168.1.1", interface) + + def test_get_default_gateway_reachable_on_interface_none(self): + interface = None + with self.assertRaises(ValueError) as context: + get_default_gateway_reachable_on(interface) + self.assertTrue( + "Unable to ping on interface None" in str(context.exception) + ) + + @patch("gateway_ping_test.Route") + @patch("gateway_ping_test.is_reachable", return_value=False) + def test_get_default_gateway_reachable_on_no_reachable_gateway( + self, mock_is_reachable, mock_route + ): + mock_route.return_value.get_default_gateways.return_value = [ + "192.168.1.1", + "192.168.1.2", + ] + interface = "eth0" + with self.assertRaises(ValueError) as context: + get_default_gateway_reachable_on(interface) + self.assertTrue( + "Unable to reach any estimated gateway of interface eth0" + in str(context.exception) + ) + mock_route.assert_called_once_with(interface=interface) + self.assertEqual( + mock_is_reachable.call_count, len(["192.168.1.1", "192.168.1.2"]) + ) + + @patch("gateway_ping_test.subprocess.check_output") + @patch("gateway_ping_test.ping") + @patch("gateway_ping_test.Route") + @patch("gateway_ping_test.is_reachable", return_value=True) + def test_get_any_host_reachable_on_host_reachable( + self, mock_is_reachable, mock_route, mock_ping, mock_subprocess_output + ): + mock_route.return_value.get_broadcast.return_value = "192.168.1.255" + mock_subprocess_output.return_value = ( + "? (192.168.1.100) at ab:cd:ef:12:34:56 [ether] on eth0\n" + ) + interface = "eth0" + expected_host = "192.168.1.100" + result = get_any_host_reachable_on(interface) + self.assertEqual(result, expected_host) + + def test_get_any_host_reachable_on_interface_none(self): + interface = None + with self.assertRaises(ValueError) as context: + get_any_host_reachable_on(interface) + self.assertTrue( + "Unable to ping on interface None" in str(context.exception) + ) + + @patch("gateway_ping_test.subprocess.check_output") + @patch("gateway_ping_test.ping") + @patch("gateway_ping_test.Route") + @patch("gateway_ping_test.is_reachable", return_value=False) + @patch("gateway_ping_test.time.sleep") # Mock sleep to speed up the test + def test_get_any_host_reachable_on_no_reachable_host( + self, + mock_sleep, + mock_is_reachable, + mock_route, + mock_ping, + mock_subprocess_output, + ): + mock_route.return_value.get_broadcast.return_value = "192.168.1.255" + mock_subprocess_output.return_value = ( + "? (192.168.1.100) at ab:cd:ef:12:34:56 [ether] on eth0\n" + ) + interface = "eth0" + with self.assertRaises(ValueError) as context: + get_any_host_reachable_on(interface) + self.assertTrue( + "Unable to reach any host on interface eth0" + in str(context.exception) + ) + + @patch("gateway_ping_test.is_reachable", return_value=True) + def test_get_host_to_ping_priority_target(self, _): + self.assertEqual(get_host_to_ping("eth0", "10.0.0.1"), "10.0.0.1") + + @patch("gateway_ping_test.is_reachable", return_value=False) + @patch( + "gateway_ping_test.get_default_gateway_reachable_on", + return_value="10.0.0.20", + ) + @patch("gateway_ping_test.Route") + def test_get_host_to_ping_priority_default_gateway( + self, mock_route, mock_default_gateway_rechable_on, _ + ): + # default gateway is on the same interface as the route target + # 10.0.0.1 but 10.0.0.1 is not reachable + self.assertEqual(get_host_to_ping(None, "10.0.0.1"), "10.0.0.20") + + @patch("gateway_ping_test.is_reachable", return_value=False) + @patch( + "gateway_ping_test.get_any_host_reachable_on", return_value="10.0.1.2" + ) + @patch( + "gateway_ping_test.get_default_gateway_reachable_on", + side_effect=ValueError, + ) + @patch("gateway_ping_test.Route") + def test_get_host_to_ping_priority_any_route( + self, + mock_route, + mock_default_gateway_rechable_on, + mock_get_any_host_reachable_on, + _, + ): + # default gateway is on the same interface as the route target + # but both are unreachable, the test should try to get any reachable + # tagets on the interface + self.assertEqual(get_host_to_ping(None, "10.0.0.1"), "10.0.1.2") + + @patch("gateway_ping_test.is_reachable", return_value=False) + @patch( + "gateway_ping_test.get_any_host_reachable_on", side_effect=ValueError + ) + @patch( + "gateway_ping_test.get_default_gateway_reachable_on", + side_effect=ValueError, + ) + @patch("gateway_ping_test.Route") + def test_get_host_to_ping_priority_failure( + self, + mock_route, + mock_default_gateway_rechable_on, + mock_get_any_host_reachable_on, + _, + ): + # we are unable to reach any target on the interface that should + # reach 10.0.0.1 + self.assertIsNone(get_host_to_ping(None, "10.0.0.1")) + + +class TestPingFunction(unittest.TestCase): + @patch("subprocess.check_output") + def test_ping_ok(self, mock_check_output): + mock_check_output.return_value = ( + "4 packets transmitted, 4 received, 0% packet loss" + ) + result = ping("8.8.8.8", "eth0", 4, 5, verbose=True) + self.assertEqual(result["transmitted"], 4) + self.assertEqual(result["received"], 4) + self.assertEqual(result["pct_loss"], 0) + + @patch("subprocess.check_output") + def test_ping_malformed_output(self, mock_check_output): + mock_check_output.return_value = "Malformed output" + result = ping("8.8.8.8", "eth0", 4, 5, verbose=True) + self.assertIn("Failed to parse", result["cause"]) + + @patch("subprocess.check_output") + def test_ping_no_ping(self, mock_check_output): + mock_check_output.side_effect = FileNotFoundError("ping not found") + result = ping("8.8.8.8", "eth0", 4, 5, verbose=True) + self.assertEqual(result["cause"], str(mock_check_output.side_effect)) + + @patch("subprocess.check_output") + def test_ping_failure(self, mock_check_output): + mock_check_output.side_effect = MagicMock( + side_effect=subprocess.CalledProcessError( + 1, "ping", "ping: unknown host" + ) + ) + result = ping("invalid.host", None, 4, 5) + # Since the function does not return a detailed error for general + # failures, we just check for non-success + self.assertNotEqual( + result["received"], 4 + ) # Assuming failure means not all packets are received + + @patch("subprocess.check_output") + def test_ping_failure_broadcast(self, mock_check_output): + # Simulate broadcast ping which always fails + mock_check_output.side_effect = MagicMock( + side_effect=subprocess.CalledProcessError( + 1, "ping", stderr="SO_BINDTODEVICE: Operation not permitted" + ) + ) + result = ping("255.255.255.255", None, 4, 5, broadcast=True) + self.assertIsNone(result) class TestMainFunction(unittest.TestCase): From d070c640bccf664c0882583c1dc0d3a0d1fdb7b8 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 7 Feb 2024 17:37:02 +0100 Subject: [PATCH 022/108] Explicitly use verbose kwarg (bugfix) (#983) Explicitly use verbose kwarg --- providers/base/bin/wifi_client_test_netplan.py | 2 +- providers/base/bin/wifi_nmcli_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/base/bin/wifi_client_test_netplan.py b/providers/base/bin/wifi_client_test_netplan.py index d984e2e54a..f314480454 100755 --- a/providers/base/bin/wifi_client_test_netplan.py +++ b/providers/base/bin/wifi_client_test_netplan.py @@ -256,7 +256,7 @@ def perform_ping_test(interface): if target: count = 5 - result = ping(target, interface, count, 10, True) + result = ping(target, interface, count, 10, verbose=True) if result['received'] == count: return True diff --git a/providers/base/bin/wifi_nmcli_test.py b/providers/base/bin/wifi_nmcli_test.py index be1ad2943e..91000bbafe 100755 --- a/providers/base/bin/wifi_nmcli_test.py +++ b/providers/base/bin/wifi_nmcli_test.py @@ -119,7 +119,7 @@ def perform_ping_test(interface): if target: count = 5 - result = ping(target, interface, count, 10, True) + result = ping(target, interface, count, 10, verbose=True) if result['received'] == count: return True From 850cde3a9376c01f940fb6863f56c51821d312b0 Mon Sep 17 00:00:00 2001 From: mreed8855 Date: Fri, 9 Feb 2024 11:11:45 -0600 Subject: [PATCH 023/108] Update the Virtualization Script for 24.04 (BugFix) (#968) Updated the canonical-certification.conf for noble Updated the precheck script to remove the UVT_KVM test Updated the script to call the KVM check image test --- .../configs/canonical-certification.conf | 6 +-- .../tools/canonical-certification-precheck | 39 +------------------ 2 files changed, 4 insertions(+), 41 deletions(-) diff --git a/providers/certification-server/configs/canonical-certification.conf b/providers/certification-server/configs/canonical-certification.conf index a6bb620b01..7c7c159b04 100644 --- a/providers/certification-server/configs/canonical-certification.conf +++ b/providers/certification-server/configs/canonical-certification.conf @@ -48,7 +48,7 @@ DISK_MDADM_READ_PERF = 150 # KVM_IMAGE = http://cloud-images.ubuntu.com/daily/server/daily/server/{release} # # Example: -# KVM_IMAGE = file:///home/ubuntu/jammy-server-cloudimg-amd64.img +# KVM_IMAGE = file:///home/ubuntu/noble-server-cloudimg-amd64.img # # Note that a MAAS server set up via the MANIACS guide may optionally house # copies of the KVM images under http://{ip-address}/cloud/. @@ -79,8 +79,8 @@ DISK_MDADM_READ_PERF = 150 # UNCOMMENT BOTH OF THE FOLLOWING 2 LINES nad set the proper URLs as necessary # OR leave them commented out and the LXD test will obtain the needed images # from cloud-images.ubuntu.com. -#LXD_ROOTFS = http://server_url/path/to/jammy-server-cloudimg-amd64.squashfs -#LXD_TEMPLATE = http://server_url/path/to/jammy-server-cloudimg-amd64-lxd.tar.xz +#LXD_ROOTFS = http://server_url/path/to/noble-server-cloudimg-amd64.squashfs +#LXD_TEMPLATE = http://server_url/path/to/noble-server-cloudimg-amd64-lxd.tar.xz # The STRESS_NG_MIN_SWAP_SIZE environment variable sets the minimum swap # space, in gibibytes (GiB), that the stress/memory_stress_ng test requires. diff --git a/providers/certification-server/tools/canonical-certification-precheck b/providers/certification-server/tools/canonical-certification-precheck index cd5b70a698..8eefac691e 100755 --- a/providers/certification-server/tools/canonical-certification-precheck +++ b/providers/certification-server/tools/canonical-certification-precheck @@ -20,7 +20,7 @@ info(){ eval pass${i}=3 } -steps="Fix_APT_Issues Verify_Config_File Ubuntu_Version Arch EFI_Mode CCS_Version SID_Check Installed_Ram Virtualization_Support NICs_enabled Jumbo_Frames IPERF Network_Subnets LVM_Check Hard_Disks USB_Disks Disk_Speed_Check UVT_KVM_Image_Check LXD_Image_Check XDG_Check CPUFreq_Check GPGPU_Check NVDIMM_Detection" +steps="Fix_APT_Issues Verify_Config_File Ubuntu_Version Arch EFI_Mode CCS_Version SID_Check Installed_Ram Virtualization_Support NICs_enabled Jumbo_Frames IPERF Network_Subnets LVM_Check Hard_Disks USB_Disks Disk_Speed_Check KVM_Image_Check LXD_Image_Check XDG_Check CPUFreq_Check GPGPU_Check NVDIMM_Detection" while getopts "i" opt; do case $opt in @@ -388,43 +388,6 @@ else fi } -# Check local availability of UVT KVM image or source -UVT_KVM_Image_Check(){ -echoname "UVT_KVM Image Check" -if grep "^UVT_IMAGE_OR_SOURCE =" $configfile >/dev/null; then - kvmurl=$(grep ^UVT_IMAGE_OR_SOURCE $configfile|awk '{print $3}') - echo $kvmurl - if [[ "$kvmurl" =~ .*\.img$ ]] ; then - /usr/bin/qemu-img check $kvmurl >/dev/null 2>&1 - if [ $? = "0" ]; then - echo -e " $kvmurl is a valid image.\n Continuing to use this image" - pass - return 0 - else - echo -e "$kvmurl is an invalid image. \n" - fail - return 0 - fi - elif [[ "$kvmurl" =~ ^http ]] ; then - if curl --output /dev/null --silent --head -fail "$kvmurl"; then - echo -e " This is a valid source $kvmurl.\n Continuing to use this source" - pass - else - echo -e " This is not a valid source $kvmurl.\n Marking as failed" - fail - fi - else - echo -e " This is not a valid image or source" - echo -e " Leaving configuration but commenting out.\n Trying cloud image." - sudo sed -i '/^UVT_IMAGE_OR_SOURCE/ s/^#*/#/' $configfile - fail - fi -else - echo -e " UVT_IMAGE_OR_SOURCE is not configured. \n uvt-simplestreams-libvirt will download images from cloud-images.ubuntu by default" - fail -fi -} - # Check local availability of KVM image or pull from Internet, which tends to be slow KVM_Image_Check(){ echoname "KVM Image Check" From 3e6c0e1ec07efa7e1a7ede8982bfb86453316e2b Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Mon, 12 Feb 2024 15:49:05 +0800 Subject: [PATCH 024/108] Add uniqueness for Template Units id (BugFix) (#951) * Make sure Template units are units with id A Template unit always contains an id field. It should therefore inherit UnitWithId instead of Unit, so that the `partial_id()` and `id()` methods are inherited as well. * Prevent Template Units id from being rendered when using Jinja2 By default, templates using Jinja2 engine gets their id rendered as soon as they get accessed, but since they don't have the appropriate environment, the parameters are replaced with an empty string. This is why when you run the command `checkbox-cli list template`, you may see things like this: template 'com.canonical.certification::camera/still_' instead of template 'com.canonical.certification::camera/still_{{ name }}' Note that this does not happen with standard Python formatted strings: template 'com.canonical.certification::camera/led_{name}' In order to make the behavior consistent regardless of the template engine used, this commit: - introduces a unit property that always returns "template" (similar to how the Job unit always returns "job"). - modifies Unit.get_record_value() to prevent rendering using Jinja2 templating if the unit is a Checkbox Template It also contains unit tests for both the TemplateUnit and its Validator. * Have Template Unit's "template-id" field generated from the "id" field, if absent * Expose Template Unit's template-id field instead of id PlainBoxObject is used when exposing Units at a high level; for instance, when running `checkbox-cli list template`. Instead of exposing the Template Unit's id field, expose the template-id field. * Use Template partial_id and id, similar to Job's partial_id and id In order to have more granularity on what is displayed for the template id, use methods similar to the Job Unit. * Add unit test to ensure templates have a unique id * Implement explain() in TemplateUnit This method overrides UnitWithId.explain() which displays the Unit's id. In the case of a Template, this is misleading, and the template_id (or, in this case, template_partial_id to prevent displaying the provider namespace) should be used instead. This is useful when reporting errors using `./manage.py validate`, for instance. * Update Template Unit documentation about template-id field * Cleanup Template Unit reference documentation --- checkbox-ng/plainbox/impl/highlevel.py | 4 +- checkbox-ng/plainbox/impl/test_highlevel.py | 40 +++++++ checkbox-ng/plainbox/impl/unit/template.py | 97 ++++++++++++--- .../plainbox/impl/unit/test_template.py | 113 ++++++++++++++++++ checkbox-ng/plainbox/impl/unit/test_unit.py | 43 +++++++ checkbox-ng/plainbox/impl/unit/unit.py | 2 + docs/reference/units/template.rst | 59 +++++---- 7 files changed, 319 insertions(+), 39 deletions(-) create mode 100644 checkbox-ng/plainbox/impl/test_highlevel.py diff --git a/checkbox-ng/plainbox/impl/highlevel.py b/checkbox-ng/plainbox/impl/highlevel.py index 6d5f8000bb..8991b2bf38 100644 --- a/checkbox-ng/plainbox/impl/highlevel.py +++ b/checkbox-ng/plainbox/impl/highlevel.py @@ -293,9 +293,11 @@ def _file_to_obj(self, unit): def _template_to_obj(self, unit): return PlainBoxObject( - unit, group=unit.Meta.name, name=unit.id, attrs=OrderedDict(( + unit, group=unit.Meta.name, name=unit.template_id, + attrs=OrderedDict(( ('id', unit.id), ('partial_id', unit.partial_id), + ('template_id', unit.template_id), ('template_unit', unit.template_unit), ('template_resource', unit.template_resource), ('template_filter', unit.template_filter), diff --git a/checkbox-ng/plainbox/impl/test_highlevel.py b/checkbox-ng/plainbox/impl/test_highlevel.py new file mode 100644 index 0000000000..619272d9c9 --- /dev/null +++ b/checkbox-ng/plainbox/impl/test_highlevel.py @@ -0,0 +1,40 @@ +# This file is part of Checkbox. +# +# Copyright 2024 Canonical Ltd. +# Written by: +# Pierre Equoy +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . + +from unittest import TestCase + +from plainbox.impl.highlevel import Explorer +from plainbox.impl.unit.template import TemplateUnit + + +class TestExplorer(TestCase): + def test_template_to_obj__without_template_id(self): + template = TemplateUnit({ + "id": "id", + }) + explorer = Explorer() + obj = explorer._template_to_obj(template) + self.assertEqual(obj.name, "id") + + def test_template_to_obj__with_template_id(self): + template = TemplateUnit({ + "template-id": "template-id", + }) + explorer = Explorer() + obj = explorer._template_to_obj(template) + self.assertEqual(obj.name, "template-id") diff --git a/checkbox-ng/plainbox/impl/unit/template.py b/checkbox-ng/plainbox/impl/unit/template.py index c78119dd0b..3df94fc07e 100644 --- a/checkbox-ng/plainbox/impl/unit/template.py +++ b/checkbox-ng/plainbox/impl/unit/template.py @@ -23,6 +23,7 @@ """ import itertools import logging +import string from plainbox.i18n import gettext as _ from plainbox.i18n import gettext_noop as N_ @@ -35,13 +36,13 @@ from plainbox.impl.unit import all_units from plainbox.impl.unit import concrete_validators from plainbox.impl.unit import get_accessed_parameters -from plainbox.impl.unit.unit import Unit -from plainbox.impl.unit.unit import UnitValidator +from plainbox.impl.unit.unit_with_id import UnitWithId +from plainbox.impl.unit.unit_with_id import UnitWithIdValidator from plainbox.impl.unit.validators import CorrectFieldValueValidator from plainbox.impl.unit.validators import ReferenceConstraint from plainbox.impl.unit.validators import UnitReferenceValidator +from plainbox.impl.unit.validators import UniqueValueValidator from plainbox.impl.validation import Problem -from plainbox.impl.validation import Severity __all__ = ['TemplateUnit'] @@ -50,7 +51,7 @@ logger = logging.getLogger("plainbox.unit.template") -class TemplateUnitValidator(UnitValidator): +class TemplateUnitValidator(UnitWithIdValidator): """Validator for template unit.""" @@ -77,8 +78,28 @@ def check(self, unit): self.issue_list.append(issue) yield issue + def explain(self, unit, field, kind, message): + """ + Lookup an explanatory string for a given issue kind + + :returns: + A string (explanation) or None if the issue kind + is not known to this method. + + This version overrides the base implementation to use the unit + template_id, if it is available, when reporting issues. + """ + if unit.template_partial_id is None: + return super().explain(unit, field, kind, message) + stock_msg = self._explain_map.get(kind) + if stock_msg is None: + return None + return _("{unit} {id!a}, field {field!a}, {message}").format( + unit=unit.tr_unit(), id=unit.template_partial_id, field=str(field), + message=message or stock_msg) + -class TemplateUnit(Unit): +class TemplateUnit(UnitWithId): """ Template that can instantiate zero or more additional units. @@ -166,21 +187,13 @@ def __str__(self): return "{} <~ {}".format(self.id, self.resource_id) @property - def partial_id(self): + def unit(self): """ - Identifier of this job, without the provider name. + The value of the unit field (overridden) - This field should not be used anymore, except for display + The return value is always "template" """ - return self.get_record_value('id', '?') - - @property - def id(self): - """Identifier of this template unit.""" - if self.provider: - return "{}::{}".format(self.provider.namespace, self.partial_id) - else: - return self.partial_id + return "template" @property def resource_partial_id(self): @@ -216,6 +229,44 @@ def resource_id(self): else: return "{}::{}".format(resource_namespace, resource_partial_id) + @classmethod + def slugify_template_id(cls, _string=None): + """ + Remove unwanted characters from a raw job id string. + + This helps exposing cleaner looking template ids when the id is + generated from the id field by removing characters like '{', '}', + and ' '. + """ + if _string: + valid_chars = frozenset( + "-_.:/\\{}{}".format(string.ascii_letters, string.digits) + ) + return "".join(c if c in valid_chars else "" for c in _string) + + @property + def template_partial_id(self): + """ + Identifier of this template, without the provider namespace. + + If the ``template-id`` field is not present in the unit definition, + ``template_partial_id`` is computed from the ``partial_id`` attribute. + """ + template_partial_id = self.get_record_value("template-id") + if not template_partial_id: + template_partial_id = self.slugify_template_id(self.partial_id) + return template_partial_id + + @property + def template_id(self): + """Identifier of this template, with the provider namespace.""" + if self.provider and self.template_partial_id: + return "{}::{}".format(self.provider.namespace, + self.template_partial_id + ) + else: + return self.template_partial_id + @property def template_resource(self): """value of the 'template-resource' field.""" @@ -436,6 +487,7 @@ class fields(SymbolDef): """Symbols for each field that a TemplateUnit can have.""" + template_id = "template-id" template_unit = 'template-unit' template_resource = 'template-resource' template_filter = 'template-filter' @@ -444,6 +496,17 @@ class fields(SymbolDef): validator_cls = TemplateUnitValidator field_validators = { + fields.template_id: [ + concrete_validators.untranslatable, + concrete_validators.templateVariant, + UniqueValueValidator(), + # We want to have bare, namespace-less identifiers + CorrectFieldValueValidator( + lambda value, unit: ( + "::" not in unit.get_record_value("template-id")), + message=_("identifier cannot define a custom namespace"), + onlyif=lambda unit: unit.get_record_value("template-id")), + ], fields.template_unit: [ concrete_validators.untranslatable, ], diff --git a/checkbox-ng/plainbox/impl/unit/test_template.py b/checkbox-ng/plainbox/impl/unit/test_template.py index 97216f1b68..180ddb2708 100644 --- a/checkbox-ng/plainbox/impl/unit/test_template.py +++ b/checkbox-ng/plainbox/impl/unit/test_template.py @@ -42,6 +42,14 @@ class TemplateUnitTests(TestCase): + def test_id(self): + template = TemplateUnit({ + "template-resource": "resource", + "template-id": "check-devices", + "id": "check-device-{dev_name}", + }) + self.assertEqual(template.id, "check-device-{dev_name}") + def test_resource_partial_id__empty(self): """ Ensure that ``resource_partial_id`` defaults to None @@ -167,6 +175,54 @@ def test_resource_id__template_and_provider_ns(self): 'template-resource': 'rc' }, provider=provider).resource_id, 'namespace::rc') + def test_slugify(self): + self.assertEqual( + TemplateUnit.slugify_template_id("stress/benchmark_{disk}"), + "stress/benchmark_disk" + ) + self.assertEqual( + TemplateUnit.slugify_template_id("ns::stress/benchmark_{disk}"), + "ns::stress/benchmark_disk" + ) + self.assertEqual( + TemplateUnit.slugify_template_id("suspend_{{ iterations }}_times"), + "suspend_iterations_times" + ) + self.assertEqual(TemplateUnit.slugify_template_id(), None) + + def test_template_id(self): + self.assertEqual(TemplateUnit({ + "template-id": "template_id", + }).template_id, "template_id") + + def test_template_id__from_job_id(self): + self.assertEqual(TemplateUnit({ + "id": "job_id_{param}", + }).template_id, "job_id_param") + + def test_template_id__precedence(self): + """Ensure template-id takes precedence over job id.""" + self.assertEqual(TemplateUnit({ + "template-id": "template_id", + "id": "job_id_{param}", + }).template_id, "template_id") + + def test_template_id__from_job_id_jinja2(self): + self.assertEqual(TemplateUnit({ + "template-resource": "resource", + "template-engine": "jinja2", + "id": "job_id_{{ param }}", + }).template_id, "job_id_param") + + def test_template_id__precedence_jinja2(self): + """Ensure template-id takes precedence over Jinja2-templated job id.""" + self.assertEqual(TemplateUnit({ + "template-id": "template_id", + "template-resource": "resource", + "template-engine": "jinja2", + "id": "job_id_{{ param }}", + }).template_id, "template_id") + def test_template_resource__empty(self): self.assertEqual(TemplateUnit({}).template_resource, None) @@ -351,6 +407,14 @@ def test_instantiate_all(self): class TemplateUnitJinja2Tests(TestCase): + def test_id_jinja2(self): + template = TemplateUnit({ + 'template-resource': 'resource', + 'template-engine': 'jinja2', + 'id': 'check-device-{{ dev_name }}', + }) + self.assertEqual(template.id, "check-device-{{ dev_name }}") + def test_instantiate_one_jinja2(self): template = TemplateUnit({ 'template-resource': 'resource', @@ -374,6 +438,55 @@ class TemplateUnitFieldValidationTests(UnitFieldValidationTests): unit_cls = TemplateUnit + def test_template_id__untranslatable(self): + issue_list = self.unit_cls({ + '_template-id': 'template_id' + }, provider=self.provider).check() + self.assertIssueFound( + issue_list, self.unit_cls.Meta.fields.template_id, + Problem.unexpected_i18n, Severity.warning) + + def test_template_id__bare(self): + issue_list = self.unit_cls({ + "template-id": "ns::id" + }, provider=self.provider).check() + message = ("template 'ns::id', field 'template-id', identifier cannot " + "define a custom namespace") + self.assertIssueFound( + issue_list, self.unit_cls.Meta.fields.template_id, + Problem.wrong, Severity.error, message) + + def test_template_id__unique(self): + unit = self.unit_cls({ + 'template-id': 'id' + }, provider=self.provider) + other_unit = self.unit_cls({ + 'template-id': 'id' + }, provider=self.provider) + self.provider.unit_list = [unit, other_unit] + self.provider.problem_list = [] + context = UnitValidationContext([self.provider]) + message_start = ( + "{} 'id', field 'template-id', clashes with 1 other unit," + " look at: " + ).format(unit.tr_unit()) + issue_list = unit.check(context=context) + issue = self.assertIssueFound( + issue_list, self.unit_cls.Meta.fields.template_id, + Problem.not_unique, Severity.error) + self.assertTrue(issue.message.startswith(message_start)) + + def test_unit__present(self): + """ + TemplateUnit.unit always returns "template", the default error for the + base Unit class should never happen. + """ + issue_list = self.unit_cls({ + }, provider=self.provider).check() + message = "field 'unit', unit should explicitly define its type" + self.assertIssueNotFound(issue_list, self.unit_cls.Meta.fields.unit, + Problem.missing, Severity.advice, message) + def test_template_unit__untranslatable(self): issue_list = self.unit_cls({ # NOTE: the value must be a valid unit! diff --git a/checkbox-ng/plainbox/impl/unit/test_unit.py b/checkbox-ng/plainbox/impl/unit/test_unit.py index bdc231884b..06f45d2ce5 100644 --- a/checkbox-ng/plainbox/impl/unit/test_unit.py +++ b/checkbox-ng/plainbox/impl/unit/test_unit.py @@ -79,6 +79,49 @@ def assertIssueFound(self, issue_list, field=None, kind=None, '\n'.join(" - {!r}".format(issue) for issue in issue_list)) return self.fail(msg) + def assertIssueNotFound(self, issue_list, field=None, kind=None, + severity=None, message=None): + """ + Raise an assertion if no issue matching the provided criteria is found + + :param issue_list: + A list of issues to look through + :param field: + (optional) value that must match the same attribute on the Issue + :param kind: + (optional) value that must match the same attribute on the Issue + :param severity: + (optional) value that must match the same attribute on the Issue + :param message: + (optional) value that must match the same attribute on the Issue + :returns: + The issue matching those constraints, if found + """ + for issue in issue_list: + if field is not None and issue.field is not field: + continue + if severity is not None and issue.severity is not severity: + continue + if kind is not None and issue.kind is not kind: + continue + if message is not None and issue.message != message: + continue + # return issue + return self.fail("Issue matching the given criteria found!") + msg = "Issue matching:\n{}\nwas found in:\n{}".format( + '\n'.join( + ' * {} is {!r}'.format(issue_attr, value) + for issue_attr, value in + [('field', field), + ('severity', severity), + ('kind', kind), + ('message', message)] + if value is not None), + '\n'.join(" - {!r}".format(issue) for issue in issue_list)) + return self.fail(msg) + else: + return issue + class TestUnitDefinition(TestCase): diff --git a/checkbox-ng/plainbox/impl/unit/unit.py b/checkbox-ng/plainbox/impl/unit/unit.py index 8cc84946f1..1e16406366 100644 --- a/checkbox-ng/plainbox/impl/unit/unit.py +++ b/checkbox-ng/plainbox/impl/unit/unit.py @@ -713,6 +713,7 @@ def get_record_value(self, name, default=None): value is not None and self.template_engine == "jinja2" and not self.is_parametric + and not self.unit == "template" ): tmp_params = { "__checkbox_env__": self._checkbox_env(), @@ -755,6 +756,7 @@ def get_raw_record_value(self, name, default=None): value is not None and self.template_engine == "jinja2" and not self.is_parametric + and not self.unit == "template" ): tmp_params = { "__checkbox_env__": self._checkbox_env(), diff --git a/docs/reference/units/template.rst b/docs/reference/units/template.rst index 296ef0c785..eeb013defc 100644 --- a/docs/reference/units/template.rst +++ b/docs/reference/units/template.rst @@ -4,21 +4,21 @@ Template Unit ============= -The template unit is a variant of Plainbox unit types. A template is a skeleton +The template unit is a variant of Checkbox unit types. A template is a skeleton for defining additional units, typically job definitions. A template is defined -as a typical RFC822-like Plainbox unit (like a typical job definition) with the +as a typical RFC822-like Checkbox unit (like a typical job definition) with the exception that all the fields starting with the string ``template-`` are reserved for the template itself while all the other fields are a definition of all the eventual instances of the template. -There is one particular requirement on the job's ``id`` field for the instances -to be generated. This ``id`` field value must be template-based, for example :: +There are two requirements for the job's ``id`` field for the instances to +be generated. It must be unique, and it must be template-based, for example:: unit: template template-resource: graphics_card template-engine: jinja2 template-unit: job - id: chromium_webcam_encoding_{{driver}}_{{bus}} + id: chromium_webcam_encoding_{{ driver }}_{{ bus }} instead of:: @@ -29,13 +29,21 @@ instead of:: id: chromium_webcam_encoding Template-Specific Fields ------------------------- +======================== -There are four fields that are specific to the template unit: +.. _Template template-id field: + +``template-id`` + Unique identifier for this template. + + This field is optional. If absent, a ``template-id`` value will be computed + from the ``id`` field. For instance, if the ``id`` field is + ``stress/reboot_{iterations}_times``, the computed ``template-id`` field + will be ``stress/reboot_iterations_times``. .. _Template template-unit field: -``template-unit``: +``template-unit`` Name of the unit type this template will generate. By default job definition units are generated (as if the field was specified with the value of ``job``) eventually but other values may be used as well. @@ -44,7 +52,7 @@ There are four fields that are specific to the template unit: .. _Template template-resource field: -``template-resource``: +``template-resource`` Name of the resource job (if it is a compatible resource identifier) to use to parametrize the template. This must either be a name of a resource job available in the namespace the template unit belongs to *or* a valid @@ -55,7 +63,7 @@ There are four fields that are specific to the template unit: .. _Template template-imports field: -``template-imports``: +``template-imports`` A resource import statement. It can be used to refer to arbitrary resource job by its full identifier and (optionally) give it a short variable name. @@ -75,7 +83,7 @@ There are four fields that are specific to the template unit: .. _Template template-filter field: -``template-filter``: +``template-filter`` A resource program that limits the set of records from which template instances will be made. The syntax of this field is the same as the syntax of typical job definition unit's ``requires`` field, that is, it is a @@ -89,14 +97,14 @@ There are four fields that are specific to the template unit: .. _Template template-engine field: -``template-engine``: +``template-engine`` Name of the template engine to use, default is python string formatting - (See PEP 3101). Currently the only other supported engine is jinja2. + (See PEP 3101). The only other supported engine is ``jinja2``. This field is optional. Instantiation -------------- +============= When a template is instantiated, a single record object is used to fill in the parametric values to all the applicable fields. Each field is formatted using @@ -167,19 +175,24 @@ hard drive available on the system:: block_device.{name}_state != 'removable' user: root command: disk_stats_test {name} - _description: This test checks {name} disk stats, generates some activity and rechecks stats to verify they've changed. It also verifies that disks appear in the various files they're supposed to. + _description: This test checks {name} disk stats, generates some activity + and rechecks stats to verify they've changed. It also verifies that disks + appear in the various files they're supposed to. The ``template-resource`` used here (``device``) refers to a resource job using the ``udev_resource`` script to get information about the system. The ``udev_resource`` script returns a list of items with attributes such as ``path`` and ``name``, so we can use these directly in our template. -``block_device`` is an other resource unit used for setting a requirement on the state of the current device. +``block_device`` is an other resource unit used for setting a requirement +on the state of the current device. Simple Jinja templates example ------------------------------ -Jinja2 can be used as the templating engine instead of python string formatting. This allows the author to access some powerful templating features including expressions. +Jinja2 can be used as the templating engine instead of python string +formatting. This allows the author to access some powerful templating features +including expressions. First here is the previous disk stats example converted to jinja2:: @@ -195,18 +208,22 @@ First here is the previous disk stats example converted to jinja2:: block_device.{{ name }}_state != 'removable' user: root command: disk_stats_test {{ name }} - _description: This test checks {{ name }} disk stats, generates some activity and rechecks stats to verify they've changed. It also verifies that disks appear in the various files they're supposed to. + _description: This test checks {{ name }} disk stats, generates some + activity and rechecks stats to verify they've changed. It also verifies + that disks appear in the various files they're supposed to. Template engine additional features ------------------------------------ +=================================== -Plainbox populates the template parameter dictionary with some extra keys to aid the author. +Checkbox populates the template parameter dictionary with some extra keys +to aid the author. ``__index__``: If a template unit can result in N content jobs then this variable is equal to how many jobs have been created so far. -Following parameters are only available for ``template-engine``: ``jinja2``: +Following parameters are only available for templates based on the Jinja2 +engine (see :ref:`Template template-engine field`): ``__system_env__``: When checkbox encounters a template to render it will populate this From ebb12d0df08f4fec0fa3200b9f79d214a835dfa0 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 14 Feb 2024 17:38:40 +0100 Subject: [PATCH 025/108] Implement ExpectNot operator for metabox (infra) (#987) * Draft implementation of expect_not Minor: small refactor of expect * Propagate the api --- metabox/metabox/core/actions.py | 4 +++ metabox/metabox/core/lxd_execute.py | 48 +++++++++++++++++------------ metabox/metabox/core/scenario.py | 5 +++ 3 files changed, 37 insertions(+), 20 deletions(-) diff --git a/metabox/metabox/core/actions.py b/metabox/metabox/core/actions.py index e1032b2c3f..cd2388cb5f 100644 --- a/metabox/metabox/core/actions.py +++ b/metabox/metabox/core/actions.py @@ -49,6 +49,10 @@ class Expect(ActionBase): handler = 'expect' +class ExpectNot(ActionBase): + handler = 'expect_not' + + class Send(ActionBase): handler = 'send' diff --git a/metabox/metabox/core/lxd_execute.py b/metabox/metabox/core/lxd_execute.py index 5dc864a49a..35b4b79aa5 100644 --- a/metabox/metabox/core/lxd_execute.py +++ b/metabox/metabox/core/lxd_execute.py @@ -47,10 +47,12 @@ def __init__(self, *args, **kwargs): self.stdout_lock = threading.Lock() self._new_data = False self._lookup_by_id = False + self._connection_closed = False def received_message(self, message): if len(message.data) == 0: self.close() + self._connection_closed = True if self.verbose: raw_msg = self.ansi_escape.sub( "", message.data.decode("utf-8", errors="ignore") @@ -62,31 +64,36 @@ def received_message(self, message): self._new_data = True def expect(self, data, timeout=0): - not_found = True + found = False start_time = time.time() - while not_found: + if isinstance(data, str): + data = data.encode("utf-8") + while not found: time.sleep(0.1) - if type(data) != str: - check = data.search(self.stdout_data) - else: - check = data.encode("utf-8") in self.stdout_data + check = data in self.stdout_data if check: + # truncate the history because subsequent expect should not + # re-match the same text with self.stdout_lock: - if type(data) != str: - self.stdout_data = data.split(self.stdout_data) - else: - self.stdout_data = self.stdout_data.split( - data.encode("utf-8"), maxsplit=1 - )[-1] - not_found = False - if timeout and time.time() > start_time + timeout: - logger.warning( - "'{}' not found! Timeout is reached (set to {})", - data, - timeout, + self.stdout_data = self.stdout_data.split( + data, maxsplit=1 + )[-1] + found = True + elif timeout and time.time() > start_time + timeout: + msg = "'{}' not found! Timeout is reached (set to {})".format( + data, timeout ) - raise TimeoutError - return not_found is False + logger.warning(msg) + raise TimeoutError(msg) + elif self._connection_closed: + # this could have been updated from the other thread, lets + # check before exiting the loop + found = found or data in self.stdout_data + break + return found + + def expect_not(self, data, timeout=0): + return not self.expect(data, timeout) def select_test_plan(self, data, timeout=0): if not self._lookup_by_id: @@ -199,6 +206,7 @@ def interactive_execute(container, cmd, env={}, verbose=False, timeout=0): ws_urls = container.raw_interactive_execute( login_shell + env_wrapper(env) + shlex.split(cmd) ) + base_websocket_url = container.client.websocket_url ctl = WebSocketClient(base_websocket_url) ctl.resource = ws_urls["control"] diff --git a/metabox/metabox/core/scenario.py b/metabox/metabox/core/scenario.py index 992b3cb67e..bd0e860576 100644 --- a/metabox/metabox/core/scenario.py +++ b/metabox/metabox/core/scenario.py @@ -218,6 +218,11 @@ def expect(self, data, timeout=60): outcome = self._pts.expect(data, timeout) self._checks.append(outcome) + def expect_not(self, data, timeout=60): + assert self._pts is not None + outcome = self._pts.expect_not(data, timeout) + self._checks.append(outcome) + def send(self, data): assert self._pts is not None self._pts.send(data.encode("utf-8"), binary=True) From 1237d7811d1bd805fa913b22f8a619cf4874244a Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Thu, 15 Feb 2024 09:55:28 +0100 Subject: [PATCH 026/108] Patch back in regex support (infra) (#988) Patch back in regex support --- metabox/metabox/core/lxd_execute.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/metabox/metabox/core/lxd_execute.py b/metabox/metabox/core/lxd_execute.py index 35b4b79aa5..d4c190bb31 100644 --- a/metabox/metabox/core/lxd_execute.py +++ b/metabox/metabox/core/lxd_execute.py @@ -68,15 +68,28 @@ def expect(self, data, timeout=0): start_time = time.time() if isinstance(data, str): data = data.encode("utf-8") + + try: + # custom classes, like _re, provide this functionality and are a + # valid input + search = data.search + split = data.split + except AttributeError: + + def search(stream): + return data in stream + + split = bytes.split + while not found: time.sleep(0.1) - check = data in self.stdout_data + check = search(self.stdout_data) if check: # truncate the history because subsequent expect should not # re-match the same text with self.stdout_lock: - self.stdout_data = self.stdout_data.split( - data, maxsplit=1 + self.stdout_data = split( + self.stdout_data, data, maxsplit=1 )[-1] found = True elif timeout and time.time() > start_time + timeout: From 9c26bae0b762c29ae123c8df3b12e64253b8ede7 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Thu, 15 Feb 2024 15:55:58 +0100 Subject: [PATCH 027/108] Fix metabox expect typerror (infra) (#990) * Fix metabox expect typerror * _re now uses strings * Remove pointless stdin_payload * Actually remove pointless payload --- metabox/metabox/core/lxd_execute.py | 72 ++++++++++++++++------------- metabox/metabox/core/scenario.py | 4 +- metabox/metabox/core/utils.py | 2 +- 3 files changed, 42 insertions(+), 36 deletions(-) diff --git a/metabox/metabox/core/lxd_execute.py b/metabox/metabox/core/lxd_execute.py index d4c190bb31..d861f2d60d 100644 --- a/metabox/metabox/core/lxd_execute.py +++ b/metabox/metabox/core/lxd_execute.py @@ -26,6 +26,7 @@ import metabox.core.keys as keys from metabox.core.utils import ExecuteResult from ws4py.client.threadedclient import WebSocketClient +from metabox.core.utils import _re base_env = { @@ -42,8 +43,8 @@ class InteractiveWebsocket(WebSocketClient): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.stdout_data = bytearray() - self.stdout_data_full = bytearray() + self.stdout_data: str = "" + self.stdout_data_full: str = "" self.stdout_lock = threading.Lock() self._new_data = False self._lookup_by_id = False @@ -53,33 +54,41 @@ def received_message(self, message): if len(message.data) == 0: self.close() self._connection_closed = True + message_data_str = message.data.decode("utf-8", errors="ignore") if self.verbose: - raw_msg = self.ansi_escape.sub( - "", message.data.decode("utf-8", errors="ignore") - ) + raw_msg = self.ansi_escape.sub("", message_data_str) logger.trace(raw_msg.rstrip()) with self.stdout_lock: - self.stdout_data += message.data - self.stdout_data_full += message.data + self.stdout_data += message_data_str + self.stdout_data_full += message_data_str self._new_data = True - def expect(self, data, timeout=0): - found = False - start_time = time.time() - if isinstance(data, str): - data = data.encode("utf-8") + def get_search_split(self, search_pattern): + if isinstance(search_pattern, _re): + search = search_pattern.search + split_first = search_pattern.split + elif isinstance(search_pattern, str): - try: - # custom classes, like _re, provide this functionality and are a - # valid input - search = data.search - split = data.split - except AttributeError: + def search(buffer): + return search_pattern in buffer - def search(stream): - return data in stream + def split_first(buffer): + return buffer.split(search_pattern, maxsplit=1) + + else: + raise TypeError( + "Unsupported search pattern type: {}".format( + type(search_pattern) + ) + ) + + return (search, split_first) + + def expect(self, pattern, timeout=0): + found = False + start_time = time.time() - split = bytes.split + search, split_first = self.get_search_split(pattern) while not found: time.sleep(0.1) @@ -88,20 +97,18 @@ def search(stream): # truncate the history because subsequent expect should not # re-match the same text with self.stdout_lock: - self.stdout_data = split( - self.stdout_data, data, maxsplit=1 - )[-1] + self.stdout_data = split_first(self.stdout_data)[-1] found = True elif timeout and time.time() > start_time + timeout: msg = "'{}' not found! Timeout is reached (set to {})".format( - data, timeout + pattern, timeout ) logger.warning(msg) raise TimeoutError(msg) elif self._connection_closed: # this could have been updated from the other thread, lets # check before exiting the loop - found = found or data in self.stdout_data + found = found or search(self.stdout_data) break return found @@ -118,19 +125,19 @@ def select_test_plan(self, data, timeout=0): max_attemps = 10 attempt = 0 still_on_first_screen = True - old_stdout_data = b"" + old_stdout_data = "" if len(data) > 67: data = data[:67] + " │\r\n│ " + data[67:] while attempt < max_attemps: if self._new_data and self.stdout_data: if old_stdout_data == self.stdout_data: break - check = data.encode("utf-8") in self.stdout_data + check = data in self.stdout_data if not check: self._new_data = False with self.stdout_lock: old_stdout_data = self.stdout_data - self.stdout_data = bytearray() + self.stdout_data = "" stdin_payload = keys.KEY_PAGEDOWN + keys.KEY_SPACE self.send(stdin_payload.encode("utf-8"), binary=True) still_on_first_screen = False @@ -150,18 +157,18 @@ def select_test_plan(self, data, timeout=0): self.send(keys.KEY_PAGEDOWN.encode("utf-8"), binary=True) while attempt < max_attemps: if self._new_data and self.stdout_data: - check = data.encode("utf-8") in self.stdout_data + check = data in self.stdout_data if not check: self._new_data = False with self.stdout_lock: - self.stdout_data = bytearray() + self.stdout_data = "" stdin_payload = keys.KEY_UP + keys.KEY_SPACE self.send(stdin_payload.encode("utf-8"), binary=True) attempt = 0 else: not_found = False with self.stdout_lock: - self.stdout_data = bytearray() + self.stdout_data = "" break else: time.sleep(0.1) @@ -260,7 +267,6 @@ def on_stderr(msg): + shlex.split(cmd), # noqa 503 stdout_handler=on_stdout, stderr_handler=on_stderr, - stdin_payload=open(__file__), ) if timeout and res.exit_code == 137: logger.warning("{} Timeout is reached (set to {})", cmd, timeout) diff --git a/metabox/metabox/core/scenario.py b/metabox/metabox/core/scenario.py index bd0e860576..dd8ac16f2a 100644 --- a/metabox/metabox/core/scenario.py +++ b/metabox/metabox/core/scenario.py @@ -79,7 +79,7 @@ def __init__( def get_output_streams(self): if self._pts: - return self._pts.stdout_data_full.decode("utf-8") + return self._pts.stdout_data_full return self._outstr_full def has_passed(self): @@ -148,7 +148,7 @@ def assert_not_printed(self, pattern): regex = re.compile(pattern) if self._pts: found = regex.search( - self._pts.stdout_data_full.decode("utf-8", errors="ignore") + self._pts.stdout_data_full ) else: found = regex.search(self._stdout) or regex.search(self._stderr) diff --git a/metabox/metabox/core/utils.py b/metabox/metabox/core/utils.py index 68ee551854..196e93c37c 100644 --- a/metabox/metabox/core/utils.py +++ b/metabox/metabox/core/utils.py @@ -42,7 +42,7 @@ class ExecuteResult(NamedTuple): class _re: def __init__(self, pattern, flags=0): self._raw_pattern = pattern - self._pattern = re.compile(pattern.encode("utf-8"), flags) + self._pattern = re.compile(pattern, flags) def __repr__(self): return f"Regex {self._raw_pattern}" From c345eee07c9d04d5406fe466300efd32fa150b9f Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Thu, 15 Feb 2024 16:26:41 +0100 Subject: [PATCH 028/108] Fix quit option for manual jobs (bugfix) (#989) * Fix quit * Add new ui quit metabox test Minor: renamed urwidui -> ui * Test RemoteController change * Test also remote assistant remember_users_response --- .../checkbox_ng/launcher/controller.py | 6 + .../checkbox_ng/launcher/test_controller.py | 148 +++++++++++++++++- .../plainbox/impl/session/remote_assistant.py | 5 + .../impl/session/test_remote_assistant.py | 30 ++++ .../scenarios/{urwid => ui}/__init__.py | 0 metabox/metabox/scenarios/ui/interact_jobs.py | 56 +++++++ .../scenarios/{urwid => ui}/testplan.py | 0 7 files changed, 244 insertions(+), 1 deletion(-) rename metabox/metabox/scenarios/{urwid => ui}/__init__.py (100%) create mode 100644 metabox/metabox/scenarios/ui/interact_jobs.py rename metabox/metabox/scenarios/{urwid => ui}/testplan.py (100%) diff --git a/checkbox-ng/checkbox_ng/launcher/controller.py b/checkbox-ng/checkbox_ng/launcher/controller.py index c3aacfd5d8..5df25e5e2e 100644 --- a/checkbox-ng/checkbox_ng/launcher/controller.py +++ b/checkbox-ng/checkbox_ng/launcher/controller.py @@ -726,6 +726,9 @@ def _run_jobs(self, jobs_repr, total_num=0): ) if cmd == "skip": next_job = True + elif cmd == "quit": + self.sa.remember_users_response(cmd) + raise SystemExit("Session saved, exiting...") self.sa.remember_users_response(cmd) self.wait_for_job(dont_finish=True) elif interaction.kind in "steps": @@ -738,6 +741,9 @@ def _run_jobs(self, jobs_repr, total_num=0): ) if cmd == "skip": next_job = True + elif cmd == "quit": + self.sa.remember_users_response(cmd) + raise SystemExit("Session saved, exiting...") self.sa.remember_users_response(cmd) elif interaction.kind == "verification": self.wait_for_job(dont_finish=True) diff --git a/checkbox-ng/checkbox_ng/launcher/test_controller.py b/checkbox-ng/checkbox_ng/launcher/test_controller.py index e5fb8f3c1d..5cafe9f747 100644 --- a/checkbox-ng/checkbox_ng/launcher/test_controller.py +++ b/checkbox-ng/checkbox_ng/launcher/test_controller.py @@ -252,6 +252,152 @@ def test_resume_or_start_new_session_interactive(self): self.assertTrue(self_mock.interactively_choose_tp.called) + @mock.patch("checkbox_ng.launcher.controller.SimpleUI") + def test__run_jobs_description_command_none(self, simple_ui_mock): + self_mock = mock.MagicMock() + interaction_mock = mock.MagicMock() + interaction_mock.kind = "description" + + self_mock.sa.run_job.return_value = [interaction_mock] + jobs_repr_mock = { + "id": "id_mock", + "command": None, + "num": 0, + "name": "name", + "category_name": "category", + } + simple_ui_mock().wait_for_interaction_prompt.return_value = "skip" + + RemoteController._run_jobs(self_mock, [jobs_repr_mock]) + + @mock.patch("checkbox_ng.launcher.controller.SimpleUI") + def test__run_jobs_description_skip(self, simple_ui_mock): + self_mock = mock.MagicMock() + interaction_mock = mock.MagicMock() + interaction_mock.kind = "description" + + self_mock.sa.run_job.return_value = [interaction_mock] + jobs_repr_mock = { + "id": "id_mock", + "command": "skip_description", + "num": 0, + "name": "name", + "category_name": "category", + } + simple_ui_mock().wait_for_interaction_prompt.return_value = "skip" + + RemoteController._run_jobs(self_mock, [jobs_repr_mock]) + + @mock.patch("checkbox_ng.launcher.controller.SimpleUI") + def test__run_jobs_description_enter(self, simple_ui_mock): + self_mock = mock.MagicMock() + interaction_mock = mock.MagicMock() + interaction_mock.kind = "description" + + self_mock.sa.run_job.return_value = [interaction_mock] + jobs_repr_mock = { + "id": "id_mock", + "command": "skip_description", + "num": 0, + "name": "name", + "category_name": "category", + } + simple_ui_mock().wait_for_interaction_prompt.return_value = "" + + RemoteController._run_jobs(self_mock, [jobs_repr_mock]) + + @mock.patch("checkbox_ng.launcher.controller.SimpleUI") + def test__run_jobs_description_quit(self, simple_ui_mock): + self_mock = mock.MagicMock() + interaction_mock = mock.MagicMock() + interaction_mock.kind = "description" + + self_mock.sa.run_job.return_value = [interaction_mock] + jobs_repr_mock = { + "id": "id_mock", + "command": "quit_description", + "num": 0, + "name": "name", + "category_name": "category", + } + simple_ui_mock().wait_for_interaction_prompt.return_value = "quit" + + with self.assertRaises(SystemExit): + RemoteController._run_jobs(self_mock, [jobs_repr_mock]) + + @mock.patch("checkbox_ng.launcher.controller.SimpleUI") + def test__run_jobs_steps_run(self, simple_ui_mock): + self_mock = mock.MagicMock() + interaction_mock = mock.MagicMock() + interaction_mock.kind = "steps" + + self_mock.sa.run_job.return_value = [interaction_mock] + jobs_repr_mock = { + "id": "id_mock", + "command": None, + "num": 0, + "name": "name", + "category_name": "category", + } + + RemoteController._run_jobs(self_mock, [jobs_repr_mock]) + + @mock.patch("checkbox_ng.launcher.controller.SimpleUI") + def test__run_jobs_steps_enter(self, simple_ui_mock): + self_mock = mock.MagicMock() + interaction_mock = mock.MagicMock() + interaction_mock.kind = "steps" + + self_mock.sa.run_job.return_value = [interaction_mock] + jobs_repr_mock = { + "id": "id_mock", + "command": "skip_description", + "num": 0, + "name": "name", + "category_name": "category", + } + simple_ui_mock().wait_for_interaction_prompt.return_value = "" + + RemoteController._run_jobs(self_mock, [jobs_repr_mock]) + + @mock.patch("checkbox_ng.launcher.controller.SimpleUI") + def test__run_jobs_steps_skip(self, simple_ui_mock): + self_mock = mock.MagicMock() + interaction_mock = mock.MagicMock() + interaction_mock.kind = "steps" + + self_mock.sa.run_job.return_value = [interaction_mock] + jobs_repr_mock = { + "id": "id_mock", + "command": "skip_description", + "num": 0, + "name": "name", + "category_name": "category", + } + simple_ui_mock().wait_for_interaction_prompt.return_value = "skip" + + RemoteController._run_jobs(self_mock, [jobs_repr_mock]) + + @mock.patch("checkbox_ng.launcher.controller.SimpleUI") + def test__run_jobs_steps_quit(self, simple_ui_mock): + self_mock = mock.MagicMock() + interaction_mock = mock.MagicMock() + interaction_mock.kind = "steps" + + self_mock.sa.run_job.return_value = [interaction_mock] + jobs_repr_mock = { + "id": "id_mock", + "command": "quit_description", + "num": 0, + "name": "name", + "category_name": "category", + } + simple_ui_mock().wait_for_interaction_prompt.return_value = "quit" + + with self.assertRaises(SystemExit): + RemoteController._run_jobs(self_mock, [jobs_repr_mock]) + + class IsHostnameALoopbackTests(TestCase): @mock.patch("socket.gethostbyname") @mock.patch("ipaddress.ip_address") @@ -294,4 +440,4 @@ def test_is_hostname_a_loopback_socket_raises(self, gethostbyname_mock): when the socket.gethostname function raises an exception """ gethostbyname_mock.side_effect = socket.gaierror - self.assertFalse(is_hostname_a_loopback("foobar")) \ No newline at end of file + self.assertFalse(is_hostname_a_loopback("foobar")) diff --git a/checkbox-ng/plainbox/impl/session/remote_assistant.py b/checkbox-ng/plainbox/impl/session/remote_assistant.py index c637f665e4..1785f67dda 100644 --- a/checkbox-ng/plainbox/impl/session/remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/remote_assistant.py @@ -213,6 +213,11 @@ def remember_users_response(self, response): self._current_comments = "" self._state = TestsSelected return + elif response == "quit": + self._last_response = response + self._state = Idle + self.finalize_session() + return self._last_response = response self._state = Running diff --git a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py index 62a7b5eef9..760eeaa054 100644 --- a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py @@ -273,6 +273,36 @@ def test_resume_by_id_with_result_file_not_json(self, mock_load_configs): rsa._sa.use_job_result.assert_called_with(rsa._last_job, mjr, True) + def test_remember_users_response_quit(self): + self_mock = mock.MagicMock() + self_mock._state = remote_assistant.Interacting + + remote_assistant.RemoteSessionAssistant.remember_users_response( + self_mock, "quit" + ) + + self.assertEqual(self_mock._state, remote_assistant.Idle) + self.assertTrue(self_mock.finalize_session.called) + + def test_remember_users_response_rollback(self): + self_mock = mock.MagicMock() + self_mock._state = remote_assistant.Interacting + + remote_assistant.RemoteSessionAssistant.remember_users_response( + self_mock, "rollback" + ) + + self.assertEqual(self_mock._state, remote_assistant.TestsSelected) + + def test_remember_users_response_run(self): + self_mock = mock.MagicMock() + self_mock._state = remote_assistant.Interacting + + remote_assistant.RemoteSessionAssistant.remember_users_response( + self_mock, "run" + ) + + self.assertEqual(self_mock._state, remote_assistant.Running) class RemoteAssistantFinishJobTests(TestCase): def setUp(self): diff --git a/metabox/metabox/scenarios/urwid/__init__.py b/metabox/metabox/scenarios/ui/__init__.py similarity index 100% rename from metabox/metabox/scenarios/urwid/__init__.py rename to metabox/metabox/scenarios/ui/__init__.py diff --git a/metabox/metabox/scenarios/ui/interact_jobs.py b/metabox/metabox/scenarios/ui/interact_jobs.py new file mode 100644 index 0000000000..f8dd646ec9 --- /dev/null +++ b/metabox/metabox/scenarios/ui/interact_jobs.py @@ -0,0 +1,56 @@ +# This file is part of Checkbox. +# +# Copyright 2024 Canonical Ltd. +# Written by: +# Massimiliano Girardi +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. + +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . +import textwrap + +import metabox.core.keys as keys +from metabox.core.actions import ( + Expect, + Send, + Start, + ExpectNot +) +from metabox.core.scenario import Scenario +from metabox.core.utils import tag + + +@tag("manual", "interact") +class ManualInteractQuit(Scenario): + launcher = textwrap.dedent( + """ + [launcher] + launcher_version = 1 + stock_reports = text + [test plan] + unit = 2021.com.canonical.certification::cert-blocker-manual-resume + forced = yes + [test selection] + forced = yes + """ + ) + + steps = [ + Start(), + Expect("Pick an action"), + Send("p" + keys.KEY_ENTER), + Expect("save the session and quit"), + Send("q" + keys.KEY_ENTER), + # if q is pressed, checkbox should exit instead of going ahead printing + # results + ExpectNot("Results") + ] diff --git a/metabox/metabox/scenarios/urwid/testplan.py b/metabox/metabox/scenarios/ui/testplan.py similarity index 100% rename from metabox/metabox/scenarios/urwid/testplan.py rename to metabox/metabox/scenarios/ui/testplan.py From dffecdc8e0e7da75f1ccdff6c2b5f4755a64ae88 Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Mon, 19 Feb 2024 15:34:24 +0800 Subject: [PATCH 029/108] Add template-summary and template-description fields to the Template Unit (New) (#985) * Add template-summary and template-description fields and validators Add two fields for the Template Unit: - template-summary - template-description These fields mimick the summary and description fields of the Job Unit to provide a quick explanation and a longer form one about what a template unit does. Both these fields are translatable, and validation is added for the template-summary field to make sure if a value is provided, it is one line only and is less than 80 characters. * Add unit tests for the new Template Unit fields * Update Template Unit documentation * Add unit tests for the translation methods of the new fields --- checkbox-ng/plainbox/impl/unit/template.py | 56 +++++++++++++ .../plainbox/impl/unit/test_template.py | 80 +++++++++++++++++++ docs/reference/units/template.rst | 18 +++++ 3 files changed, 154 insertions(+) diff --git a/checkbox-ng/plainbox/impl/unit/template.py b/checkbox-ng/plainbox/impl/unit/template.py index 3df94fc07e..8941138ff9 100644 --- a/checkbox-ng/plainbox/impl/unit/template.py +++ b/checkbox-ng/plainbox/impl/unit/template.py @@ -27,6 +27,7 @@ from plainbox.i18n import gettext as _ from plainbox.i18n import gettext_noop as N_ +from plainbox.impl.decorators import instance_method_lru_cache from plainbox.impl.resource import ExpressionFailedError from plainbox.impl.resource import Resource from plainbox.impl.resource import ResourceProgram @@ -39,10 +40,12 @@ from plainbox.impl.unit.unit_with_id import UnitWithId from plainbox.impl.unit.unit_with_id import UnitWithIdValidator from plainbox.impl.unit.validators import CorrectFieldValueValidator +from plainbox.impl.unit.validators import PresentFieldValidator from plainbox.impl.unit.validators import ReferenceConstraint from plainbox.impl.unit.validators import UnitReferenceValidator from plainbox.impl.unit.validators import UniqueValueValidator from plainbox.impl.validation import Problem +from plainbox.impl.validation import Severity __all__ = ['TemplateUnit'] @@ -295,6 +298,40 @@ def template_imports(self): """ return self.get_record_value('template-imports') + @property + def template_summary(self): + """ + Value of the 'template-summary' field. + + This attribute stores the summary of a template, that is a human + readable name for that template. + """ + return self.get_record_value("template-summary") + + @instance_method_lru_cache(maxsize=None) + def tr_template_summary(self): + """ + Get the translated version of :meth:`template_summary`. + """ + return self.get_translated_record_value("template-summary") + + @property + def template_description(self): + """ + Value of the 'template-description' field. + + This attribute stores the definition of a template which can be used + to provide more information about this template. + """ + return self.get_record_value("template-description") + + @instance_method_lru_cache(maxsize=None) + def tr_template_description(self): + """ + Get the translated version of :meth:`template_description`. + """ + return self.get_translated_record_value("template-description") + @property def template_unit(self): """ @@ -488,6 +525,8 @@ class fields(SymbolDef): """Symbols for each field that a TemplateUnit can have.""" template_id = "template-id" + template_summary = "template-summary" + template_description = "template-description" template_unit = 'template-unit' template_resource = 'template-resource' template_filter = 'template-filter' @@ -507,6 +546,23 @@ class fields(SymbolDef): message=_("identifier cannot define a custom namespace"), onlyif=lambda unit: unit.get_record_value("template-id")), ], + fields.template_summary: [ + concrete_validators.translatable, + PresentFieldValidator(severity=Severity.advice), + CorrectFieldValueValidator( + lambda field: field.count("\n") == 0, + Problem.wrong, Severity.warning, + message=_("please use only one line"), + onlyif=lambda unit: unit.template_summary), + CorrectFieldValueValidator( + lambda field: len(field) <= 80, + Problem.wrong, Severity.warning, + message=_("please stay under 80 characters"), + onlyif=lambda unit: unit.template_summary) + ], + fields.template_description: [ + concrete_validators.translatable, + ], fields.template_unit: [ concrete_validators.untranslatable, ], diff --git a/checkbox-ng/plainbox/impl/unit/test_template.py b/checkbox-ng/plainbox/impl/unit/test_template.py index 180ddb2708..ead808f35d 100644 --- a/checkbox-ng/plainbox/impl/unit/test_template.py +++ b/checkbox-ng/plainbox/impl/unit/test_template.py @@ -223,6 +223,42 @@ def test_template_id__precedence_jinja2(self): "id": "job_id_{{ param }}", }).template_id, "template_id") + def test_template_summary(self): + self.assertEqual(TemplateUnit({ + "template-summary": "summary", + }).template_summary, "summary") + + def test_template_description(self): + self.assertEqual(TemplateUnit({ + "template-description": "description", + }).template_description, "description") + + def test_tr_template_summary(self): + template = TemplateUnit({ + "_template-summary": "summary", + }) + self.assertEqual(template.tr_template_summary(), "summary") + + def test_translated_template_summary(self): + """Ensure template_summary is populated with the translated field.""" + self.assertEqual(TemplateUnit({ + "_template-summary": "summary", + }).template_summary, "summary") + + def test_tr_template_description(self): + template = TemplateUnit({ + "_template-description": "description", + }) + self.assertEqual(template.tr_template_description(), "description") + + def test_translated_template_description(self): + """ + Ensure template_description is populated with the translated field. + """ + self.assertEqual(TemplateUnit({ + "_template-description": "description", + }).template_description, "description") + def test_template_resource__empty(self): self.assertEqual(TemplateUnit({}).template_resource, None) @@ -496,6 +532,50 @@ def test_template_unit__untranslatable(self): issue_list, self.unit_cls.Meta.fields.template_unit, Problem.unexpected_i18n, Severity.warning) + def test_template_summary__translatable(self): + issue_list = self.unit_cls({ + 'template-summary': 'template_summary' + }, provider=self.provider).check() + self.assertIssueFound(issue_list, + self.unit_cls.Meta.fields.template_summary, + Problem.expected_i18n, + Severity.warning) + + def test_template_summary__present(self): + issue_list = self.unit_cls({ + }, provider=self.provider).check() + self.assertIssueFound(issue_list, + self.unit_cls.Meta.fields.template_summary, + Problem.missing, + Severity.advice) + + def test_template_summary__one_line(self): + issue_list = self.unit_cls({ + 'template-summary': 'line1\nline2' + }, provider=self.provider).check() + self.assertIssueFound(issue_list, + self.unit_cls.Meta.fields.template_summary, + Problem.wrong, + Severity.warning) + + def test_template_summary__short_line(self): + issue_list = self.unit_cls({ + 'template-summary': 'x' * 81 + }, provider=self.provider).check() + self.assertIssueFound(issue_list, + self.unit_cls.Meta.fields.template_summary, + Problem.wrong, + Severity.warning) + + def test_template_description__translatable(self): + issue_list = self.unit_cls({ + 'template-description': 'template_description' + }, provider=self.provider).check() + self.assertIssueFound(issue_list, + self.unit_cls.Meta.fields.template_description, + Problem.expected_i18n, + Severity.warning) + def test_template_resource__untranslatable(self): issue_list = self.unit_cls({ '_template-resource': 'template_resource' diff --git a/docs/reference/units/template.rst b/docs/reference/units/template.rst index eeb013defc..109e99f088 100644 --- a/docs/reference/units/template.rst +++ b/docs/reference/units/template.rst @@ -41,6 +41,24 @@ Template-Specific Fields ``stress/reboot_{iterations}_times``, the computed ``template-id`` field will be ``stress/reboot_iterations_times``. +.. _Template template-summary field: + +``template-summary`` + A human readable name for the template. This value is available for + translation into other languages. It must be one line long, ideally it + should be short (50-70 characters max). + + This field is optional (Checkbox will only advise you to provide one when + running provider validation). + +.. _Template template-description field: + +``template-description`` + A long form description of what the template does or the kind of jobs it + instantiates. This value is available for translation into other languages. + + This field is optional. + .. _Template template-unit field: ``template-unit`` From 58fb0c37503712ded8009b2e5f68cf2f0e532a07 Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Mon, 19 Feb 2024 16:17:55 +0800 Subject: [PATCH 030/108] Add template-id field to instantiated jobs (New) (#994) Add template-id field to instantiated jobs Pass the `template-id` field from the template to the instantiated jobs in order to track their origin. Fix CHECKBOX-1076 --- checkbox-ng/plainbox/impl/unit/job.py | 13 ++++++++++ checkbox-ng/plainbox/impl/unit/template.py | 4 ++- .../plainbox/impl/unit/test_template.py | 26 +++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/checkbox-ng/plainbox/impl/unit/job.py b/checkbox-ng/plainbox/impl/unit/job.py index a4a33a1a26..4b62bde069 100644 --- a/checkbox-ng/plainbox/impl/unit/job.py +++ b/checkbox-ng/plainbox/impl/unit/job.py @@ -374,6 +374,19 @@ def category_id(self): self.get_record_value( 'category_id', 'com.canonical.plainbox::uncategorised')) + @cached_property + def template_id(self): + """ + Fully qualified identifier of the template this job is instantiated + from. + + .. note:: + This field should only be present in jobs instantiated from + templates. It should not be used when writing jobs manually. + Therefore, it is not described in the user-facing documentation. + """ + return self.get_record_value("template-id") + @propertywithsymbols(symbols=_AutoRetryValues) def auto_retry(self): """ diff --git a/checkbox-ng/plainbox/impl/unit/template.py b/checkbox-ng/plainbox/impl/unit/template.py index 8941138ff9..ac65fa6ed4 100644 --- a/checkbox-ng/plainbox/impl/unit/template.py +++ b/checkbox-ng/plainbox/impl/unit/template.py @@ -453,9 +453,11 @@ def instantiate_one(self, resource, unit_cls_hint=None, index=0): key: value for key, value in self._raw_data.items() if not key.startswith('template-') } - # Only keep the template-engine field + # Only keep template-engine and template-id fields raw_data['template-engine'] = self.template_engine data['template-engine'] = raw_data['template-engine'] + raw_data["template-id"] = self.template_id + data["template-id"] = raw_data["template-id"] # Override the value of the 'unit' field from 'template-unit' field data['unit'] = raw_data['unit'] = self.template_unit # XXX: extract raw dictionary from the resource object, there is no diff --git a/checkbox-ng/plainbox/impl/unit/test_template.py b/checkbox-ng/plainbox/impl/unit/test_template.py index ead808f35d..08d3dcceff 100644 --- a/checkbox-ng/plainbox/impl/unit/test_template.py +++ b/checkbox-ng/plainbox/impl/unit/test_template.py @@ -371,6 +371,32 @@ def test_instantiate_one(self): self.assertEqual(job.partial_id, 'check-device-sda1') self.assertEqual(job.summary, 'Test some device (/sys/something)') self.assertEqual(job.plugin, 'shell') + self.assertEqual(job.template_id, "check-device-dev_name") + + def test_instantiate_one_with_template_id(self): + """ + Ensure the full template-id (including namespace) is passed down to the + instantiated jobs. + """ + provider = mock.Mock(spec=IProvider1) + provider.namespace = "namespace" + template = TemplateUnit({ + "template-resource": "resource", + "template-id": "origin-template", + "id": "check-device-{dev_name}", + "summary": "Test {name} ({sys_path})", + "plugin": "shell", + }, provider=provider) + job = template.instantiate_one(Resource({ + "dev_name": "sda1", + "name": "some device", + "sys_path": "/sys/something", + })) + self.assertIsInstance(job, JobDefinition) + self.assertEqual(job.partial_id, "check-device-sda1") + self.assertEqual(job.summary, "Test some device (/sys/something)") + self.assertEqual(job.plugin, "shell") + self.assertEqual(job.template_id, "namespace::origin-template") def test_instantiate_missing_parameter(self): """ From 9f0912bd67cecc69f1fa844be2a71d91ea2ffa76 Mon Sep 17 00:00:00 2001 From: LiaoU3 <58060146+LiaoU3@users.noreply.github.com> Date: Mon, 19 Feb 2024 17:17:30 +0800 Subject: [PATCH 031/108] Fix flag name for cpu/topology (Bugfix) (#995) Fix flag name also-after-suspend --- providers/base/units/cpu/jobs.pxu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/providers/base/units/cpu/jobs.pxu b/providers/base/units/cpu/jobs.pxu index 1106d68c6f..dbddbec0c9 100644 --- a/providers/base/units/cpu/jobs.pxu +++ b/providers/base/units/cpu/jobs.pxu @@ -151,7 +151,7 @@ _description: plugin: shell category_id: com.canonical.plainbox::cpu id: cpu/topology -flags: also-after-suspend- +flags: also-after-suspend estimated_duration: 1.0 requires: int(cpuinfo.count) > 1 and (cpuinfo.platform == 'i386' or cpuinfo.platform == 'x86_64') command: cpu_topology.py From 23aa9111af4b38fd2195e1bd0a8328be9a41e25a Mon Sep 17 00:00:00 2001 From: kissiel Date: Mon, 19 Feb 2024 11:03:38 +0100 Subject: [PATCH 032/108] Fix gateway ping (BugFix) (#991) * move from classmethods to static methods * add guessing the interfaces via `route` * add explicit typing import * simplify the argparse consts * simplify commandline args; always verbose * fix missing format arg * correct typing info * correct cable-only param handling * expand coverage * add ut coverage for ping fail cause * fix the spotty connection ut * codecov fight number 42 * codecov fight number 43 * codecov fight number 44 * wifi testing compat --- providers/base/bin/gateway_ping_test.py | 216 +++++++----------- .../base/bin/wifi_client_test_netplan.py | 2 +- providers/base/bin/wifi_nmcli_test.py | 2 +- .../base/tests/test_gateway_ping_test.py | 195 ++++++++++------ providers/base/units/ethernet/jobs.pxu | 4 +- 5 files changed, 215 insertions(+), 204 deletions(-) diff --git a/providers/base/bin/gateway_ping_test.py b/providers/base/bin/gateway_ping_test.py index 33bcc64cf6..c3991e404d 100755 --- a/providers/base/bin/gateway_ping_test.py +++ b/providers/base/bin/gateway_ping_test.py @@ -38,6 +38,7 @@ import time from contextlib import suppress +from typing import Dict class Route: @@ -103,33 +104,10 @@ def _get_default_gateway_from_proc(self): def _get_default_gateway_from_bin_route(self): """ - Get default gateway from /sbin/route -n - Called by get_default_gateway - and is only used if could not get that from /proc + Return the gateway for the interface associated with this Route object. """ - logging.debug( - _("Reading default gateway information from route binary") - ) - try: - routebin = subprocess.check_output( - ["/usr/bin/env", "route", "-n"], - env={"LANGUAGE": "C"}, - universal_newlines=True, - ) - except subprocess.CalledProcessError: - return None - route_line_re = re.compile( - r"^0\.0\.0\.0\s+(?P[\w.]+)(?P.+)", - flags=re.MULTILINE, - ) - route_lines = route_line_re.finditer(routebin) - for route_line in route_lines: - def_gateway = route_line.group("def_gateway") - interface = route_line.group("tail").rsplit(" ", 1)[-1] - if interface == self.interface and def_gateway: - return def_gateway - logging.error(_("Could not find default gateway by running route")) - return None + default_gws = get_default_gateways() + return default_gws.get(self.interface) def _get_ip_addr_info(self): return subprocess.check_output( @@ -192,8 +170,8 @@ def get_default_gateways(self) -> set: ) return def_gateways - @classmethod - def get_interface_from_ip(cls, ip): + @staticmethod + def get_interface_from_ip(ip): # Note: this uses -o instead of -j for xenial/bionic compatibility route_info = subprocess.check_output( ["ip", "-o", "route", "get", ip], universal_newlines=True @@ -207,8 +185,8 @@ def get_interface_from_ip(cls, ip): "Unable to determine any device used for {}".format(ip) ) - @classmethod - def get_any_interface(cls): + @staticmethod + def get_any_interface(): # Note: this uses -o instead of -j for xenial/bionic compatibility route_infos = subprocess.check_output( ["ip", "-o", "route", "show", "default", "0.0.0.0/0"], @@ -220,8 +198,8 @@ def get_any_interface(cls): return route_info_fields[4] raise ValueError("Unable to determine any valid interface") - @classmethod - def from_ip(cls, ip: str): + @staticmethod + def from_ip(ip: str): """ Build an instance of Route given an ip, if no ip is provided the best interface that can route to 0.0.0.0/0 is selected (as described by @@ -233,11 +211,11 @@ def from_ip(cls, ip: str): return Route(Route.get_any_interface()) -def is_reachable(ip, interface, verbose=False): +def is_reachable(ip, interface): """ Ping an ip to see if it is reachable """ - result = ping(ip, interface, 3, 10, verbose=verbose) + result = ping(ip, interface) return result["transmitted"] >= result["received"] > 0 @@ -259,7 +237,7 @@ def get_default_gateway_reachable_on(interface: str) -> str: ) -def get_any_host_reachable_on(interface: str, verbose=False) -> str: +def get_any_host_reachable_on(interface: str) -> str: """ Returns any host that it can reach from a given interface """ @@ -273,7 +251,7 @@ def get_any_host_reachable_on(interface: str, verbose=False) -> str: ) # retry a few times to get something in the arp table for i in range(10): - ping(broadcast, interface, 1, 1, broadcast=True, verbose=verbose) + ping(broadcast, interface, broadcast=True) # Get output from arp -a -n to get known IPs arp_table = subprocess.check_output( ["arp", "-a", "-n"], universal_newlines=True @@ -296,9 +274,7 @@ def get_any_host_reachable_on(interface: str, verbose=False) -> str: ) -def get_host_to_ping( - interface: str, target: str = None, verbose=False -) -> "str|None": +def get_host_to_ping(interface: str, target: str = None) -> "str|None": """ Attempts to determine a reachable host to ping on the specified network interface. First it tries to ping the provided target. If no target is @@ -331,10 +307,9 @@ def get_host_to_ping( def ping( host: str, interface: "str|None", - count: int, - deadline: int, + count: int = 2, + deadline: int = 4, broadcast=False, - verbose=False, ): """ pings an host via an interface count times within the given deadline. @@ -373,8 +348,8 @@ def ping( str(e), e.stdout, e.stderr ) return ping_summary - if verbose: - print(output) + + print(output) try: received = next(re.finditer(reg, output)) ping_summary = { @@ -392,8 +367,6 @@ def ping( def parse_args(argv): - default_count = 2 - default_delay = 4 parser = argparse.ArgumentParser() parser.add_argument( "host", @@ -401,68 +374,21 @@ def parse_args(argv): default=None, help=_("host to ping"), ) - parser.add_argument( - "-c", - "--count", - default=default_count, - type=int, - help=_("number of packets to send"), - ) - parser.add_argument( - "-d", - "--deadline", - default=default_delay, - type=int, - help=_("timeout in seconds"), - ) - parser.add_argument( - "-t", - "--threshold", - default=0, - type=int, - help=_("allowed packet loss percentage (default: %(default)s)"), + iface_mutex_group = parser.add_mutually_exclusive_group() + iface_mutex_group.add_argument( + "-I", + "--interface", + help=_("use specified interface to send packets"), + action="append", + dest="interfaces", + default=[None], ) - parser.add_argument( - "-v", "--verbose", action="store_true", help=_("be verbose") - ) - parser.add_argument( - "-I", "--interface", help=_("use specified interface to send packets") + iface_mutex_group.add_argument( + "--any-cable-interface", + help=_("use any cable interface to send packets"), + action="store_true", ) - args = parser.parse_args(argv) - # Ensure count and deadline make sense. Adjust them if not. - if args.deadline != default_delay and args.count != default_count: - # Ensure they're both consistent, and exit with a warning if not, - # rather than modifying what the user explicitly set. - if args.deadline <= args.count: - # FIXME: this cannot ever be translated correctly - raise SystemExit( - _( - "ERROR: not enough time for {0} pings in {1} seconds" - ).format(args.count, args.deadline) - ) - elif args.deadline != default_delay: - # Adjust count according to delay. - args.count = args.deadline - 1 - if args.count < 1: - args.count = 1 - if args.verbose: - # FIXME: this cannot ever be translated correctly - print( - _( - "Adjusting ping count to {0} to fit in {1}-second deadline" - ).format(args.count, args.deadline) - ) - else: - # Adjust delay according to count - args.deadline = args.count + 1 - if args.verbose: - # FIXME: this cannot ever be translated correctly - print( - _("Adjusting deadline to {0} seconds to fit {1} pings").format( - args.deadline, args.count - ) - ) - return args + return parser.parse_args(argv) def main(argv) -> int: @@ -474,17 +400,18 @@ def main(argv) -> int: args = parse_args(argv) + if args.any_cable_interface: + print(_("Looking for all cable interfaces...")) + all_ifaces = get_default_gateways().keys() + args.interfaces = list(filter(is_cable_interface, all_ifaces)) + # If given host is not pingable, override with something pingable. - host = get_host_to_ping( - interface=args.interface, verbose=args.verbose, target=args.host - ) - if args.verbose: - print(_("Checking connectivity to {0}").format(host)) + host = get_host_to_ping(interface=args.interfaces[0], target=args.host) + + print(_("Checking connectivity to {0}").format(host)) if host: - ping_summary = ping( - host, args.interface, args.count, args.deadline, args.verbose - ) + ping_summary = ping(host, args.interfaces[0]) else: ping_summary = { "received": 0, @@ -492,34 +419,53 @@ def main(argv) -> int: } if ping_summary["received"] == 0: - print(_("No Internet connection")) + print(_("FAIL: All packet loss.")) if ping_summary.get("cause"): print("Possible cause: {}".format(ping_summary["cause"])) return 1 elif ping_summary["transmitted"] != ping_summary["received"]: - print( - _("Connection established, but lost {0}% of packets").format( - ping_summary["pct_loss"] - ) - ) - if ping_summary["pct_loss"] > args.threshold: - print( - _( - "FAIL: {0}% packet loss is higher than {1}% threshold" - ).format(ping_summary["pct_loss"], args.threshold) - ) - return 1 - else: - print( - _("PASS: {0}% packet loss is within {1}% threshold").format( - ping_summary["pct_loss"], args.threshold - ) - ) - return 0 + print(_("FAIL: {0}% packet loss.").format(ping_summary["pct_loss"])) + return 1 else: - print(_("Connection to test host fully established")) + print(_("PASS: 0% packet loss").format(host)) return 0 +def get_default_gateways() -> Dict[str, str]: + """ + Use `route` program to find default gateways for all interfaces. + + returns a dictionary in a form of {interface_name: gateway} + """ + try: + routes = subprocess.check_output( + ["route", "-n"], universal_newlines=True + ) + except subprocess.CalledProcessError as exc: + logging.debug("Failed to run `route -n `", exc) + return {} + regex = r"^0\.0\.0\.0\s+(?P[\w.]+)\s.*\s(?P[\w.]+)$" + matches = re.finditer(regex, routes, re.MULTILINE) + + return {m.group("interface"): m.group("gw") for m in matches} + + +def is_cable_interface(interface: str) -> bool: + """ + Check if the interface is a cable interface. + This is a simple heuristic that checks if the interface is named + "enX" or "ethX" where X is a number. + + :param interface: the interface name to check + :return: True if the interface is a cable interface, False otherwise + + Looking at the `man 7 systemd.net-naming-scheme` we can see that + even the `eth` matching may be an overkill. + """ + if not isinstance(interface, str) or not interface: + return False + return interface.startswith("en") or interface.startswith("eth") + + if __name__ == "__main__": sys.exit(main(sys.argv[1:])) diff --git a/providers/base/bin/wifi_client_test_netplan.py b/providers/base/bin/wifi_client_test_netplan.py index f314480454..7a4f408fbc 100755 --- a/providers/base/bin/wifi_client_test_netplan.py +++ b/providers/base/bin/wifi_client_test_netplan.py @@ -256,7 +256,7 @@ def perform_ping_test(interface): if target: count = 5 - result = ping(target, interface, count, 10, verbose=True) + result = ping(target, interface, count, 10) if result['received'] == count: return True diff --git a/providers/base/bin/wifi_nmcli_test.py b/providers/base/bin/wifi_nmcli_test.py index 91000bbafe..6f75bdfd43 100755 --- a/providers/base/bin/wifi_nmcli_test.py +++ b/providers/base/bin/wifi_nmcli_test.py @@ -119,7 +119,7 @@ def perform_ping_test(interface): if target: count = 5 - result = ping(target, interface, count, 10, verbose=True) + result = ping(target, interface, count, 10) if result['received'] == count: return True diff --git a/providers/base/tests/test_gateway_ping_test.py b/providers/base/tests/test_gateway_ping_test.py index ecebc6a2b9..690b7cf93f 100644 --- a/providers/base/tests/test_gateway_ping_test.py +++ b/providers/base/tests/test_gateway_ping_test.py @@ -11,6 +11,8 @@ get_default_gateway_reachable_on, get_any_host_reachable_on, get_host_to_ping, + get_default_gateways, + is_cable_interface, ) @@ -46,9 +48,7 @@ def test__get_default_gateway_from_ip_invalid_route( self.assertIsNone(Route._get_default_gateway_from_ip(self_mock)) @patch("subprocess.check_output") - def test__get_default_gateway_from_ip_crash( - self, mock_check_output - ): + def test__get_default_gateway_from_ip_crash(self, mock_check_output): mock_check_output.side_effect = subprocess.CalledProcessError(1, "") self_mock = MagicMock() self_mock.interface = "eth0" @@ -92,35 +92,33 @@ def _num_to_dotted_quad(x): Route._get_default_gateway_from_proc(self_mock), ) - @patch("subprocess.check_output") - def test__get_default_gateway_from_bin_route_nominal( - self, mock_check_output - ): - mock_check_output.return_value = textwrap.dedent( - """ - Kernel IP routing table - Destination Gateway Genmask Flags Metric Ref Use Iface - 0.0.0.0 192.168.1.1 0.0.0.0 UG 100 0 0 enp5s0 - 0.0.0.0 192.168.1.100 0.0.0.0 UG 600 0 0 wlan0 - """ - ) + @patch("gateway_ping_test.get_default_gateways") + def test__get_default_gateway_from_bin_route_nominal(self, mock_get_d_gws): + mock_get_d_gws.return_value = { + "enp5s0": "192.168.1.1", + "wlan0": "192.168.1.100", + } self_mock = MagicMock() self_mock.interface = "wlan0" gateway = Route._get_default_gateway_from_bin_route(self_mock) self.assertEqual(gateway, "192.168.1.100") - @patch("subprocess.check_output") + @patch("gateway_ping_test.get_default_gateways") def test__get_default_gateway_from_bin_route_if_not_found( - self, mock_check_output + self, mock_get_d_gws ): - mock_check_output.return_value = textwrap.dedent( - """ - Kernel IP routing table - Destination Gateway Genmask Flags Metric Ref Use Iface - 0.0.0.0 192.168.1.1 0.0.0.0 UG 100 0 0 enp5s0 - 0.0.0.0 192.168.1.100 0.0.0.0 UG 600 0 0 wlan0 - """ - ) + mock_get_d_gws.return_value = { + "enp5s0": "192.168.1.1", + "wlan0": "192.168.1.100", + } + self_mock = MagicMock() + self_mock.interface = "enp1s0" + gateway = Route._get_default_gateway_from_bin_route(self_mock) + self.assertIsNone(gateway) + + @patch("gateway_ping_test.get_default_gateways") + def test__get_default_gateway_from_bin_route_empty(self, mock_get_d_gws): + mock_get_d_gws.return_value = {} self_mock = MagicMock() self_mock.interface = "enp1s0" gateway = Route._get_default_gateway_from_bin_route(self_mock) @@ -284,6 +282,14 @@ def test_from_ip_none(self, mock_get_interface_from_ip): self.assertEqual(Route.from_ip(None).interface, "enp6s0") self.assertTrue(mock_get_interface_from_ip.called) + @patch("subprocess.check_output", return_value="") + def test_get_ip_addr_info(self, mock_check_output): + self_mock = MagicMock() + self.assertEqual(Route._get_ip_addr_info(self_mock), "") + mock_check_output.assert_called_once_with( + ["ip", "-o", "addr", "show"], universal_newlines=True + ) + class TestUtilityFunctions(unittest.TestCase): @patch("gateway_ping_test.ping") @@ -455,7 +461,7 @@ def test_ping_ok(self, mock_check_output): mock_check_output.return_value = ( "4 packets transmitted, 4 received, 0% packet loss" ) - result = ping("8.8.8.8", "eth0", 4, 5, verbose=True) + result = ping("8.8.8.8", "eth0") self.assertEqual(result["transmitted"], 4) self.assertEqual(result["received"], 4) self.assertEqual(result["pct_loss"], 0) @@ -463,13 +469,13 @@ def test_ping_ok(self, mock_check_output): @patch("subprocess.check_output") def test_ping_malformed_output(self, mock_check_output): mock_check_output.return_value = "Malformed output" - result = ping("8.8.8.8", "eth0", 4, 5, verbose=True) + result = ping("8.8.8.8", "eth0") self.assertIn("Failed to parse", result["cause"]) @patch("subprocess.check_output") def test_ping_no_ping(self, mock_check_output): mock_check_output.side_effect = FileNotFoundError("ping not found") - result = ping("8.8.8.8", "eth0", 4, 5, verbose=True) + result = ping("8.8.8.8", "eth0") self.assertEqual(result["cause"], str(mock_check_output.side_effect)) @patch("subprocess.check_output") @@ -479,7 +485,7 @@ def test_ping_failure(self, mock_check_output): 1, "ping", "ping: unknown host" ) ) - result = ping("invalid.host", None, 4, 5) + result = ping("invalid.host", None) # Since the function does not return a detailed error for general # failures, we just check for non-success self.assertNotEqual( @@ -494,7 +500,7 @@ def test_ping_failure_broadcast(self, mock_check_output): 1, "ping", stderr="SO_BINDTODEVICE: Operation not permitted" ) ) - result = ping("255.255.255.255", None, 4, 5, broadcast=True) + result = ping("255.255.255.255", None, broadcast=True) self.assertIsNone(result) @@ -503,9 +509,19 @@ class TestMainFunction(unittest.TestCase): @patch("gateway_ping_test.ping") def test_no_internet_connection_no_cause( self, mock_ping, mock_get_host_to_ping + ): + mock_get_host_to_ping.return_value = "1.1.1.1" + mock_ping.return_value = {"received": 0} + result = main(["1.1.1.1"]) + self.assertEqual(result, 1) + + @patch("gateway_ping_test.get_host_to_ping") + @patch("gateway_ping_test.ping") + def test_no_internet_connection_auto_cause( + self, mock_ping, mock_get_host_to_ping ): mock_get_host_to_ping.return_value = None - mock_ping.return_value = None + mock_ping.return_value = {"received": 0} result = main(["1.1.1.1"]) self.assertEqual(result, 1) @@ -520,28 +536,27 @@ def test_no_internet_connection_cause( @patch("gateway_ping_test.get_host_to_ping") @patch("gateway_ping_test.ping") - def test_packet_loss_within_threshold( + def test_spotty_connection_with_cause( self, mock_ping, mock_get_host_to_ping ): mock_ping.return_value = { - "transmitted": 100, - "received": 95, - "pct_loss": 5, + "received": 1, + "transmitted": 2, + "pct_loss": 50, + "cause": "Test cause", } - result = main(["1.1.1.1", "-t", "10"]) - self.assertEqual(result, 0) + result = main(["1.1.1.1"]) + self.assertEqual(result, 1) @patch("gateway_ping_test.get_host_to_ping") @patch("gateway_ping_test.ping") - def test_packet_loss_exceeding_threshold( - self, mock_ping, mock_get_host_to_ping - ): + def test_some_packet_loss(self, mock_ping, mock_get_host_to_ping): mock_ping.return_value = { "transmitted": 100, - "received": 80, - "pct_loss": 20, + "received": 95, + "pct_loss": 5, } - result = main(["1.1.1.1", "-t", "10"]) + result = main(["1.1.1.1"]) self.assertEqual(result, 1) @patch("gateway_ping_test.get_host_to_ping") @@ -555,30 +570,80 @@ def test_full_connectivity(self, mock_ping, mock_get_host_to_ping): result = main(["1.1.1.1"]) self.assertEqual(result, 0) - @patch("gateway_ping_test.get_host_to_ping") + @patch("gateway_ping_test.is_reachable", return_value=True) + @patch("gateway_ping_test.get_default_gateways") @patch("gateway_ping_test.ping") - def test_verbose_output(self, mock_ping, mock_get_host_to_ping): - mock_ping.return_value = { - "transmitted": 100, - "received": 100, - "pct_loss": 0, + def test_main_any_cable(self, mock_ping, mock_get_default_gateways, _): + mock_get_default_gateways.return_value = { + "enp5s0": "192.168.1.1", + "wlan0": "192.168.1.2", } - result = main(["1.1.1.1", "-v"]) - self.assertEqual(result, 0) + main(["--any-cable-interface"]) + mock_ping.assert_called_once_with("192.168.1.1", "enp5s0") - @patch("gateway_ping_test.get_host_to_ping") - @patch("gateway_ping_test.ping") - def test_invalid_arguments_count_deadline( - self, mock_ping, mock_get_host_to_ping - ): - with self.assertRaises(SystemExit): - main(["-c", "10", "-d", "8"]) - def test_adjust_count_based_on_non_default_deadline(self): - # Assuming default_delay is 4 - args = parse_args(["-d", "1", "-v"]) - self.assertEqual( - args.count, - 1, - "Count should be adjusted based on the non-default deadline", +class GetDefaultGatewaysTests(unittest.TestCase): + @patch("subprocess.check_output") + def test_get_default_gateways_nominal(self, mock_check_output): + mock_check_output.return_value = textwrap.dedent( + """ + Kernel IP routing table + Destination Gateway Genmask Flags Metric Ref Use Iface + 0.0.0.0 192.168.1.1 0.0.0.0 UG 100 0 0 enp5s0 + 0.0.0.0 192.168.1.100 0.0.0.0 UG 600 0 0 wlan0 + """ ) + gateways = get_default_gateways() + self.assertDictEqual( + gateways, {"enp5s0": "192.168.1.1", "wlan0": "192.168.1.100"} + ) + + @patch("subprocess.check_output") + def test_get_default_gateways_no_default(self, mock_check_output): + mock_check_output.return_value = textwrap.dedent( + """ + Kernel IP routing table + Destination Gateway Genmask Flags Metric Ref Use Iface + 192.168.1.0 192.168.1.1 0.0.0.0 UG 100 0 0 enp5s0 + """ + ) + gateways = get_default_gateways() + self.assertDictEqual(gateways, {}) + + @patch("subprocess.check_output") + def test_get_default_gateways_cant_run_route(self, mock_check_output): + mock_check_output.side_effect = subprocess.CalledProcessError(1, "") + gateways = get_default_gateways() + self.assertDictEqual(gateways, {}) + + +class IsCableInterfaceTests(unittest.TestCase): + def test_is_cable_interface_nominal(self): + self.assertTrue(is_cable_interface("eth0")) + + def test_is_cable_interface_nominal_2(self): + self.assertTrue(is_cable_interface("enp5s0")) + + def test_is_cable_interface_nominal_3(self): + self.assertTrue(is_cable_interface("enp0s25")) + + def test_is_cable_interface_nope(self): + self.assertFalse(is_cable_interface("wlan0")) + + def test_is_cable_interface_nope_2(self): + self.assertFalse(is_cable_interface("wlp3s0")) + + def test_is_cable_interface_nope_3(self): + self.assertFalse(is_cable_interface("wwan0")) + + def test_is_cable_interface_nope_4(self): + self.assertFalse(is_cable_interface("tun0")) + + def test_is_cable_interface_nope_5(self): + self.assertFalse(is_cable_interface("lo")) + + def test_is_cable_interface_empty(self): + self.assertFalse(is_cable_interface("")) + + def test_is_cable_interface_not_string(self): + self.assertFalse(is_cable_interface(123)) diff --git a/providers/base/units/ethernet/jobs.pxu b/providers/base/units/ethernet/jobs.pxu index eaa5e29688..16ec79b46e 100644 --- a/providers/base/units/ethernet/jobs.pxu +++ b/providers/base/units/ethernet/jobs.pxu @@ -206,7 +206,7 @@ _summary: Can ping another machine over Ethernet port {interface} _description: Check Ethernet works by pinging another machine plugin: shell command: - gateway_ping_test.py -v --interface {interface} + gateway_ping_test.py --interface {interface} category_id: com.canonical.plainbox::ethernet estimated_duration: 4.0 flags: preserve-locale also-after-suspend @@ -299,7 +299,7 @@ _steps: 2. Follow the instructions on the screen. plugin: user-interact command: - eth_hotplugging.py {{ interface }} && gateway_ping_test.py -v --interface {{ interface }} + eth_hotplugging.py {{ interface }} && gateway_ping_test.py --interface {{ interface }} category_id: com.canonical.plainbox::ethernet estimated_duration: 60.0 user: root From abaa974a181f762cfc9f4c091cc36e34ac55c26a Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Mon, 19 Feb 2024 12:53:01 +0100 Subject: [PATCH 033/108] Removed unused screenshot tests (BugFix) (#997) Removed unused screenshot tests `camera_test.py` was called with a device that does not exist and outputs a non-informative error. --- providers/base/units/graphics/jobs.pxu | 59 ------------------- providers/base/units/graphics/legacy.pxu | 52 ---------------- providers/base/units/suspend/suspend.pxu | 22 ------- .../units/client-cert-desktop-hwe-sru.pxu | 2 - providers/sru/units/sru-ubuntucore.pxu | 31 ---------- providers/sru/units/sru.pxu | 27 --------- 6 files changed, 193 deletions(-) diff --git a/providers/base/units/graphics/jobs.pxu b/providers/base/units/graphics/jobs.pxu index d48f2b5a20..827ebba277 100644 --- a/providers/base/units/graphics/jobs.pxu +++ b/providers/base/units/graphics/jobs.pxu @@ -382,56 +382,6 @@ estimated_duration: 50.000 _description: Move a 3D window around the screen on the {vendor} {product} video card _summary: Test 3D window movement for {vendor} {product} -unit: template -template-resource: graphics_card -plugin: shell -category_id: com.canonical.plainbox::graphics -id: graphics/{index}_screenshot_{product_slug} -requires: package.name == 'fswebcam' -command: set -o pipefail; camera_test.py still --device=/dev/external_webcam -f "$PLAINBOX_SESSION_SHARE"/screenshot_{index}.jpg -q 2>&1 | ansi_parser.py -_summary: Test grabbing a screenshot for {vendor} {product} -_description: - PURPOSE: - Take a screengrab of the current screen (logged on Unity desktop) as produced by the {vendor} {product} graphics card - STEPS: - 1. Take picture using USB webcam - VERIFICATION: - Review attachment manually later - -unit: template -template-resource: graphics_card -plugin: attachment -category_id: com.canonical.plainbox::graphics -id: graphics/{index}_screenshot_{product_slug}.jpg -depends: graphics/{index}_screenshot_{product_slug} -command: cat "$PLAINBOX_SESSION_SHARE"/screenshot_{index}.jpg -_description: Attaches the screenshot captured in graphics/screenshot for the {vendor} {product} graphics card. -_summary: Attach results of screenshot test for {vendor} {product} - -unit: template -template-resource: graphics_card -plugin: shell -category_id: com.canonical.plainbox::graphics -id: graphics/{index}_screenshot_fullscreen_video_{product_slug} -requires: package.name == 'fswebcam' -command: - [ -f "$PLAINBOX_PROVIDER_DATA"/video/Ogg_Theora_Video.ogv ] || {{ echo "Video file not found"; exit 1; }} - gsettings set org.gnome.totem repeat true - totem --fullscreen "$PLAINBOX_PROVIDER_DATA"/video/Ogg_Theora_Video.ogv 2>/dev/null & - set -o pipefail - sleep 15 && camera_test.py still --device=/dev/external_webcam -f "$PLAINBOX_SESSION_SHARE"/screenshot_fullscreen_video_{index}.jpg -q 2>&1 | ansi_parser.py - sleep 5 && pkill totem - gsettings set org.gnome.totem repeat false -_summary: Test FSV screenshot for {vendor} {product} -_description: - PURPOSE: - Take a screengrab of the current screen during fullscreen video playback using the {vendor} {product} graphics card - STEPS: - 1. Start a fullscreen video playback - 2. Take picture using USB webcam after a few seconds - VERIFICATION: - Review attachment manually later - unit: template template-resource: graphics_card plugin: manual @@ -475,15 +425,6 @@ _steps: _verification: Is the system using the {vendor} {product} card now? -unit: template -template-resource: graphics_card -plugin: attachment -category_id: com.canonical.plainbox::graphics -id: graphics/{index}_screenshot_fullscreen_video_{product_slug}.jpg -depends: graphics/{index}_screenshot_fullscreen_video_{product_slug} -command: cat "$PLAINBOX_SESSION_SHARE"/screenshot_fullscreen_video_{index}.jpg -_summary: Attach results of FSV screenshot test for {vendor} {product} -_description: Attaches the screenshot captured in graphics/screenshot_fullscreen_video for the {vendor} {product} graphics card plugin: shell category_id: com.canonical.plainbox::graphics diff --git a/providers/base/units/graphics/legacy.pxu b/providers/base/units/graphics/legacy.pxu index 2b9545d20e..48011facfe 100644 --- a/providers/base/units/graphics/legacy.pxu +++ b/providers/base/units/graphics/legacy.pxu @@ -210,55 +210,3 @@ command: window_test.py -t move estimated_duration: 50.000 _description: Move a 3D window around the screen _summary: Test 3D window movement - -plugin: shell -category_id: com.canonical.plainbox::graphics -id: graphics/screenshot -requires: package.name == 'fswebcam' -command: set -o pipefail; camera_test.py still --device=/dev/external_webcam -f "${PLAINBOX_SESSION_SHARE}"/screenshot.jpg -q 2>&1 | ansi_parser.py -_summary: Test grabbing a screenshot -_description: - PURPOSE: - Take a screengrab of the current screen (logged on Unity desktop) - STEPS: - 1. Take picture using USB webcam - VERIFICATION: - Review attachment manually later - -plugin: attachment -category_id: com.canonical.plainbox::graphics -id: screenshot.jpg -depends: graphics/screenshot -command: cat "${PLAINBOX_SESSION_SHARE}"/screenshot.jpg -_description: Attaches the screenshot captured in graphics/screenshot. -_summary: Attach results of screenshot test - -plugin: shell -category_id: com.canonical.plainbox::graphics -id: graphics/screenshot_fullscreen_video -requires: package.name == 'fswebcam' -command: - [ -f "${PLAINBOX_PROVIDER_DATA}"/video/Ogg_Theora_Video.ogv ] || { echo "Video file not found"; exit 1; } - gsettings set org.gnome.totem repeat true - totem --fullscreen "${PLAINBOX_PROVIDER_DATA}"/video/Ogg_Theora_Video.ogv 2>/dev/null & - set -o pipefail - sleep 15 && camera_test.py still --device=/dev/external_webcam -f "${PLAINBOX_SESSION_SHARE}"/screenshot_fullscreen_video.jpg -q 2>&1 | ansi_parser.py - sleep 5 && totem --quit 2>/dev/null - gsettings set org.gnome.totem repeat false -_summary: Test FSV screenshot -_description: - PURPOSE: - Take a screengrab of the current screen during fullscreen video playback - STEPS: - 1. Start a fullscreen video playback - 2. Take picture using USB webcam after a few seconds - VERIFICATION: - Review attachment manually later - -plugin: attachment -category_id: com.canonical.plainbox::graphics -id: screenshot_fullscreen_video.jpg -depends: graphics/screenshot_fullscreen_video -command: cat "${PLAINBOX_SESSION_SHARE}"/screenshot_fullscreen_video.jpg -_description: Attaches the screenshot captured in graphics/screenshot_fullscreen_video. -_summary: Attach results of FSV screenshot test diff --git a/providers/base/units/suspend/suspend.pxu b/providers/base/units/suspend/suspend.pxu index 83b8642b2f..9ca63be8a4 100644 --- a/providers/base/units/suspend/suspend.pxu +++ b/providers/base/units/suspend/suspend.pxu @@ -1220,28 +1220,6 @@ command: [ -e "$PLAINBOX_SESSION_SHARE"/suspend_auto_single_log ] && cat "$PLAIN _description: Attaches the log from the single suspend/resume test to the results -plugin: shell -category_id: com.canonical.plainbox::suspend -id: suspend/screenshot_after_suspend -estimated_duration: 10.0 -depends: suspend/suspend_advanced_auto -requires: package.name == 'fswebcam' -command: set -o pipefail; camera_test.py still --device=/dev/external_webcam -f "${PLAINBOX_SESSION_SHARE}"/screenshot_after_suspend.jpg -q 2>&1 | ansi_parser.py -_description: - PURPOSE: - Take a screengrab of the current screen after suspend (logged on Unity desktop) - STEPS: - 1. Take picture using USB webcam - VERIFICATION: - Review attachment manually later - -plugin: attachment -category_id: com.canonical.plainbox::suspend -id: screenshot_after_suspend.jpg -depends: suspend/screenshot_after_suspend -command: cat "${PLAINBOX_SESSION_SHARE}"/screenshot_after_suspend.jpg -_description: Attaches the screenshot captured in graphics/screenshot. - plugin: shell category_id: com.canonical.plainbox::suspend id: suspend/gpu_lockup_after_suspend diff --git a/providers/certification-client/units/client-cert-desktop-hwe-sru.pxu b/providers/certification-client/units/client-cert-desktop-hwe-sru.pxu index 7fded7b655..860dcb9ffb 100644 --- a/providers/certification-client/units/client-cert-desktop-hwe-sru.pxu +++ b/providers/certification-client/units/client-cert-desktop-hwe-sru.pxu @@ -52,8 +52,6 @@ include: graphics/1_cycle_resolution_.* graphics/1_screen-capture-internal_.* graphics/1_screen-capture-internal_.*png - graphics/1_screenshot_.* - 1_screenshot_.* graphics/screen-capture-internal screen-capture-internal.png graphics/2_switch_card_.* certification-status=blocker diff --git a/providers/sru/units/sru-ubuntucore.pxu b/providers/sru/units/sru-ubuntucore.pxu index 2029b1e89d..3e4207d227 100644 --- a/providers/sru/units/sru-ubuntucore.pxu +++ b/providers/sru/units/sru-ubuntucore.pxu @@ -1,26 +1,3 @@ -# plugin: shell -# category_id: com.canonical.plainbox::graphics -# id: graphics/screenshot_opencv_validation -# requires: package.name == 'python-opencv' -# environ: EXTERNAL_WEBCAM_DEVICE -# command: -# screenshot_validation \ -# ${PLAINBOX_PROVIDER_DATA}/images/logo_Ubuntu_stacked_black.png \ -# --device=${EXTERNAL_WEBCAM_DEVICE:-/dev/external_webcam} \ -# -o ${PLAINBOX_SESSION_SHARE}/screenshot_opencv_validation.jpg -# _summary: Test that a screenshot is valid, automatically -# _description: -# Take a screengrab of the screen displaying a black and white Ubuntu logo. -# Check that the screenshot matches the original file using OpenCV ORB detection. - -# plugin: attachment -# category_id: com.canonical.plainbox::graphics -# id: screenshot_opencv_validation.jpg -# after: graphics/screenshot_opencv_validation -# command: [ -f ${PLAINBOX_SESSION_SHARE}/screenshot_opencv_validation.jpg ] && cat ${PLAINBOX_SESSION_SHARE}/screenshot_opencv_validation.jpg -# _description: Attaches the screenshot captured in graphics/screenshot_opencv_validation. -# _summary: Attach the screenshot captured for the automatically validated screenshot test - id: sru-ubuntucore _name: All SRU Tests (Ubuntu Core) unit: test plan @@ -56,12 +33,6 @@ include: # graphics/1_compiz_check_.* # graphics/xorg-failsafe # graphics/xorg-process -# graphics/1_screenshot_.* -# 1_screenshot_.*.jpg -# graphics/1_screenshot_fullscreen_video_.* -# 1_screenshot_fullscreen_video_.*.jpg -# graphics/screenshot_opencv_validation -# screenshot_opencv_validation.jpg # install/apt-get-gets-updates mediacard/sd-preinserted memory/info @@ -105,8 +76,6 @@ include: after-suspend-wireless/wireless_connection_wpa_ac_nm_.* after-suspend-wireless/wireless_connection_open_ac_nm_.* # suspend/gpu_lockup_after_suspend -# suspend/screenshot_after_suspend -# screenshot_after_suspend.jpg bootstrap_include: device graphics_card diff --git a/providers/sru/units/sru.pxu b/providers/sru/units/sru.pxu index 07fbc81e2b..6b2bc35b73 100644 --- a/providers/sru/units/sru.pxu +++ b/providers/sru/units/sru.pxu @@ -14,29 +14,6 @@ _description: Secondly, scan APT system update log to see any build error messages. This test is mainly used for pre-installed images which use DKMS modules. -plugin: shell -category_id: com.canonical.plainbox::graphics -id: graphics/screenshot_opencv_validation -requires: package.name == 'python-opencv' -environ: EXTERNAL_WEBCAM_DEVICE -command: - screenshot_validation \ - "${PLAINBOX_PROVIDER_DATA}"/images/logo_Ubuntu_stacked_black.png \ - --device="${EXTERNAL_WEBCAM_DEVICE:-/dev/external_webcam}" \ - -o "${PLAINBOX_SESSION_SHARE}"/screenshot_opencv_validation.jpg -_summary: Test that a screenshot is valid, automatically -_description: - Take a screengrab of the screen displaying a black and white Ubuntu logo. - Check that the screenshot matches the original file using OpenCV ORB detection. - -plugin: attachment -category_id: com.canonical.plainbox::graphics -id: screenshot_opencv_validation.jpg -after: graphics/screenshot_opencv_validation -command: [ -f "${PLAINBOX_SESSION_SHARE}"/screenshot_opencv_validation.jpg ] && cat "${PLAINBOX_SESSION_SHARE}"/screenshot_opencv_validation.jpg -_description: Attaches the screenshot captured in graphics/screenshot_opencv_validation. -_summary: Attach the screenshot captured for the automatically validated screenshot test - id: sru-server _name: All SRU Tests (Ubuntu Server) unit: test plan @@ -135,10 +112,6 @@ include: graphics/1_compiz_check_.* graphics/xorg-failsafe graphics/xorg-process - graphics/1_screenshot_.* - 1_screenshot_.*.jpg - graphics/1_screenshot_fullscreen_video_.* - 1_screenshot_fullscreen_video_.*.jpg install/apt-get-gets-updates miscellanea/dkms_build_validation networking/http From 26916be7aa4bd25db3b7acc192fe6c081634c682 Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Mon, 19 Feb 2024 14:18:02 +0100 Subject: [PATCH 034/108] Changing opencv version in tox (Infra) (#999) Changing opencv version in tox --- providers/base/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/providers/base/tox.ini b/providers/base/tox.ini index b974b67f72..3484b56759 100644 --- a/providers/base/tox.ini +++ b/providers/base/tox.ini @@ -47,7 +47,7 @@ deps = libsvm == 3.23.0.4 MarkupSafe == 1.1.0 natsort == 4.0.3 - opencv_python == 4.8.1.78 + opencv_python == 4.4.0.42 requests == 2.18.4 tqdm == 4.19.5 urwid == 2.0.1 From 07dd3de00056f0c963818002d7c58c07c0b22186 Mon Sep 17 00:00:00 2001 From: kissiel Date: Mon, 19 Feb 2024 15:33:23 +0100 Subject: [PATCH 035/108] Enable multi interface testing in `gateway_ping_test.py` (New) (#998) * enable multi-interface pinging on gateway-ping test script * add any-cable-interface ping job --- providers/base/bin/gateway_ping_test.py | 66 ++++++++++++------- .../base/tests/test_gateway_ping_test.py | 28 +++++++- providers/base/units/ethernet/jobs.pxu | 16 ++++- providers/sru/units/sru-ubuntucore.pxu | 2 +- 4 files changed, 83 insertions(+), 29 deletions(-) diff --git a/providers/base/bin/gateway_ping_test.py b/providers/base/bin/gateway_ping_test.py index c3991e404d..97c0ef35ee 100755 --- a/providers/base/bin/gateway_ping_test.py +++ b/providers/base/bin/gateway_ping_test.py @@ -38,7 +38,7 @@ import time from contextlib import suppress -from typing import Dict +from typing import Dict, List class Route: @@ -366,6 +366,36 @@ def ping( return ping_summary +def perform_ping_test(interfaces: List[str], target=None) -> None: + """ + Perform a ping test on the specified interfaces. + If any of the provided interfaces successfully pinged the target host, + the function returns 0. Otherwise, it returns 1. + """ + for iface in interfaces: + host = get_host_to_ping(iface, target) + if not host: + print( + "Failed to find a host to ping on interface {}".format(iface) + ) + continue + print( + "Pinging {} with {} interface".format( + host, iface or "*unspecified*" + ) + ) + ping_summary = ping(host, iface) + if ping_summary["received"] != ping_summary["transmitted"]: + print("FAIL: {0}% packet loss.".format(ping_summary["pct_loss"])) + continue + if ping_summary["transmitted"] > 0: + print(_("PASS: 0% packet loss").format(host)) + return 0 + else: + print("FAIL: Unable to ping any host with the above interfaces") + return 1 + + def parse_args(argv): parser = argparse.ArgumentParser() parser.add_argument( @@ -381,7 +411,6 @@ def parse_args(argv): help=_("use specified interface to send packets"), action="append", dest="interfaces", - default=[None], ) iface_mutex_group.add_argument( "--any-cable-interface", @@ -404,31 +433,18 @@ def main(argv) -> int: print(_("Looking for all cable interfaces...")) all_ifaces = get_default_gateways().keys() args.interfaces = list(filter(is_cable_interface, all_ifaces)) + if not args.interfaces: + raise SystemExit( + "FAIL: Couldn't find any suitable cable interface." + ) - # If given host is not pingable, override with something pingable. - host = get_host_to_ping(interface=args.interfaces[0], target=args.host) - - print(_("Checking connectivity to {0}").format(host)) + # If no interfaces were specified, use None to let the function + # determine the interface to use (this is to make it compliant with + # the original implementation) + if not args.interfaces: + args.interfaces = [None] - if host: - ping_summary = ping(host, args.interfaces[0]) - else: - ping_summary = { - "received": 0, - "cause": "Unable to find any host to ping", - } - - if ping_summary["received"] == 0: - print(_("FAIL: All packet loss.")) - if ping_summary.get("cause"): - print("Possible cause: {}".format(ping_summary["cause"])) - return 1 - elif ping_summary["transmitted"] != ping_summary["received"]: - print(_("FAIL: {0}% packet loss.").format(ping_summary["pct_loss"])) - return 1 - else: - print(_("PASS: 0% packet loss").format(host)) - return 0 + return perform_ping_test(args.interfaces, args.host) def get_default_gateways() -> Dict[str, str]: diff --git a/providers/base/tests/test_gateway_ping_test.py b/providers/base/tests/test_gateway_ping_test.py index 690b7cf93f..a67904c718 100644 --- a/providers/base/tests/test_gateway_ping_test.py +++ b/providers/base/tests/test_gateway_ping_test.py @@ -511,7 +511,10 @@ def test_no_internet_connection_no_cause( self, mock_ping, mock_get_host_to_ping ): mock_get_host_to_ping.return_value = "1.1.1.1" - mock_ping.return_value = {"received": 0} + mock_ping.return_value = { + "received": 0, + "transmitted": 0, + } result = main(["1.1.1.1"]) self.assertEqual(result, 1) @@ -530,7 +533,11 @@ def test_no_internet_connection_auto_cause( def test_no_internet_connection_cause( self, mock_ping, mock_get_host_to_ping ): - mock_ping.return_value = {"received": 0, "cause": "Test cause"} + mock_ping.return_value = { + "received": 0, + "transmitted": 0, + "cause": "Test cause", + } result = main(["1.1.1.1"]) self.assertEqual(result, 1) @@ -578,9 +585,26 @@ def test_main_any_cable(self, mock_ping, mock_get_default_gateways, _): "enp5s0": "192.168.1.1", "wlan0": "192.168.1.2", } + mock_ping.return_value = { + "transmitted": 100, + "received": 100, + "pct_loss": 0, + } main(["--any-cable-interface"]) mock_ping.assert_called_once_with("192.168.1.1", "enp5s0") + @patch("gateway_ping_test.is_reachable", return_value=True) + @patch("gateway_ping_test.get_default_gateways") + @patch("gateway_ping_test.ping") + def test_main_any_cable_no_iface( + self, mock_ping, mock_get_default_gateways, _ + ): + mock_get_default_gateways.return_value = { + "wlan0": "192.168.1.2", + } + with self.assertRaises(SystemExit): + main(["--any-cable-interface"]) + class GetDefaultGatewaysTests(unittest.TestCase): @patch("subprocess.check_output") diff --git a/providers/base/units/ethernet/jobs.pxu b/providers/base/units/ethernet/jobs.pxu index 16ec79b46e..ec15fbc5ec 100644 --- a/providers/base/units/ethernet/jobs.pxu +++ b/providers/base/units/ethernet/jobs.pxu @@ -212,6 +212,20 @@ estimated_duration: 4.0 flags: preserve-locale also-after-suspend user: root +id: ethernet/ping-with-any-cable-interface +_summary: Can ping the gateway with any cable Ethernet interface +_description: Check any of the cable Ethernet ports available on the system + can ping its default gateway. +category_id: com.canonical.plainbox::ethernet +estimated_duration: 4.0 +flags: also-after-suspend +user: root +plugin: shell +command: + gateway_ping_test.py --any-cable-interface +requires: + device.category == 'NETWORK' + unit: template template-resource: device template-filter: device.category == 'NETWORK' and device.mac != 'UNKNOWN' @@ -399,4 +413,4 @@ command: network_reconnect_resume_test.py -t 10 -d wired _summary: Network reconnect resume test (wired) _description: Checks the length of time it takes to reconnect an existing wired connection - after a suspend/resume cycle. \ No newline at end of file + after a suspend/resume cycle. diff --git a/providers/sru/units/sru-ubuntucore.pxu b/providers/sru/units/sru-ubuntucore.pxu index 3e4207d227..705bd542e5 100644 --- a/providers/sru/units/sru-ubuntucore.pxu +++ b/providers/sru/units/sru-ubuntucore.pxu @@ -39,7 +39,7 @@ include: ethernet/detect ethernet/info_automated networking/http - ethernet/ping_.* + ethernet/ping-with-any-cable-interface power-management/tickless_idle_.* power-management/rtc power-management/fwts_wakealarm From 659d2ee5513c0fce3d16bdda25318a226772428f Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Mon, 19 Feb 2024 16:44:33 +0100 Subject: [PATCH 036/108] Add keys to fast interact with resume menu action (new) (#1000) Add keys to fast interact with resume menu action --- checkbox-ng/checkbox_ng/resume_menu.py | 35 +++++++++++++++++--------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/checkbox-ng/checkbox_ng/resume_menu.py b/checkbox-ng/checkbox_ng/resume_menu.py index c5b4970669..cbc15b5473 100644 --- a/checkbox-ng/checkbox_ng/resume_menu.py +++ b/checkbox-ng/checkbox_ng/resume_menu.py @@ -184,11 +184,11 @@ def _create_action_view(self): """Create a view for the action selection menu.""" self._action_buttons = [ # label that goes on the button and the action string recorded - ("Add comment", "comment"), - ("Resume and skip the job", "skip"), - ("Mark the job as passed and continue", "pass"), - ("Mark the job as failed and continue", "fail"), - ("Resume and run the job again.", "rerun"), + ("Add comment (C)", "comment"), + ("Resume and skip the job (S)", "skip"), + ("Mark the job as passed and continue (P)", "pass"), + ("Mark the job as failed and continue (F)", "fail"), + ("Resume and run the job again. (R)", "rerun"), ] action_listbox_content = self._ACTION_MENU_STATIC_ELEMENTS + [ urwid.Button(btn[0]) for btn in self._action_buttons @@ -257,7 +257,7 @@ def _handle_input_on_session_menu(self, key): self.chosen_session = None raise urwid.ExitMainLoop() - elif key in ["d", "D"]: + elif key.upper() == "D": # user chose to delete the session self.chosen_session = self._entries[self.focused_index][0] self._chosen_action = "delete" @@ -269,21 +269,32 @@ def _handle_input_on_action_menu(self, key): # user cancelled the action, go back to the session selection self.chosen_session = None self.loop.widget = self._body - + return elif key == "enter": # user chose an action, let's record it and exit # if the action is "comment" we need to show the comment box - action_index = self._action_menu.base_widget.focus_position - len( self._ACTION_MENU_STATIC_ELEMENTS ) action = self._action_buttons[action_index][1] self._chosen_action = action - if action == "comment": - self.loop.widget = self._comment_view - else: - raise urwid.ExitMainLoop() + elif key.upper() == "C": + self._chosen_action = "comment" + self.loop.widget = self._comment_view + elif key.upper() == "S": + self._chosen_action = "skip" + elif key.upper() == "P": + self._chosen_action = "pass" + elif key.upper() == "F": + self._chosen_action = "fail" + elif key.upper() == "R": + self._chosen_action = "rerun" + + if self._chosen_action == "comment": + self.loop.widget = self._comment_view + else: + raise urwid.ExitMainLoop() def _handle_input_on_comment_box(self, key): """Handle input on the comment box.""" From c7e13c3258f21cebd08793c2fda641403a82d034 Mon Sep 17 00:00:00 2001 From: rickwu666666 <98441647+rickwu666666@users.noreply.github.com> Date: Tue, 20 Feb 2024 15:21:26 +0800 Subject: [PATCH 037/108] Add sd card test in strict confinement mode (New) (#972) * Add sd card test in strict confinement mode * Remove redundant template engine * Fix the job fields * Fix the clashes template job id. --- .../units/buzzer/jobs.pxu | 4 +- .../units/buzzer/test-plan.pxu | 6 +- .../units/optee/jobs.pxu | 22 +++---- .../mediacard-strict-confinement.pxu | 65 +++++++++++++++++++ .../test-plan-stress-strict-confinement.pxu | 20 ------ .../test-plan-strict-confinement.pxu | 36 ++++++++++ 6 files changed, 118 insertions(+), 35 deletions(-) create mode 100644 contrib/checkbox-provider-ce-oem/units/strict-confinement/mediacard-strict-confinement.pxu delete mode 100644 contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-stress-strict-confinement.pxu create mode 100644 contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-strict-confinement.pxu diff --git a/contrib/checkbox-provider-ce-oem/units/buzzer/jobs.pxu b/contrib/checkbox-provider-ce-oem/units/buzzer/jobs.pxu index 1f42d2f17a..96569e6d71 100644 --- a/contrib/checkbox-provider-ce-oem/units/buzzer/jobs.pxu +++ b/contrib/checkbox-provider-ce-oem/units/buzzer/jobs.pxu @@ -36,7 +36,7 @@ command: unit: template template-resource: ce-oem-gpio-buzzer-mapping template-unit: job -id: ce-oem-buzzer/sound-test-{name} +id: ce-oem-gpio-buzzer/sound-test-{name} _summary: To test {name} could be make a sound by GPIO _purpose: To test {name} could be make a sound by GPIO. @@ -70,7 +70,7 @@ command: unit: template template-resource: ce-oem-pwm-buzzer-mapping template-unit: job -id: ce-oem-buzzer/sound-test-{name} +id: ce-oem-pwm-buzzer/sound-test-{name} _summary: To test {name} could be make a sound by PWM _purpose: To test {name} could be make a sound by PWM. diff --git a/contrib/checkbox-provider-ce-oem/units/buzzer/test-plan.pxu b/contrib/checkbox-provider-ce-oem/units/buzzer/test-plan.pxu index 5363b68e13..bc5ee9e724 100644 --- a/contrib/checkbox-provider-ce-oem/units/buzzer/test-plan.pxu +++ b/contrib/checkbox-provider-ce-oem/units/buzzer/test-plan.pxu @@ -18,7 +18,8 @@ bootstrap_include: ce-oem-pwm-buzzer-mapping include: ce-oem-buzzer/input-pcspkr - ce-oem-buzzer/sound-test-.* + ce-oem-gpio-buzzer/sound-test-.* + ce-oem-pwm-buzzer/sound-test-.* id: ce-oem-buzzer-automated unit: test plan @@ -35,7 +36,8 @@ bootstrap_include: ce-oem-pwm-buzzer-mapping include: after-suspend-ce-oem-buzzer/input-pcspkr - after-suspend-ce-oem-buzzer/sound-test-.* + after-suspend-ce-oem-gpio-buzzer/sound-test-.* + after-suspend-ce-oem-pwm-buzzer/sound-test-.* id: after-suspend-ce-oem-buzzer-automated unit: test plan diff --git a/contrib/checkbox-provider-ce-oem/units/optee/jobs.pxu b/contrib/checkbox-provider-ce-oem/units/optee/jobs.pxu index 34f0ccf21b..b2a2cfbdd0 100644 --- a/contrib/checkbox-provider-ce-oem/units/optee/jobs.pxu +++ b/contrib/checkbox-provider-ce-oem/units/optee/jobs.pxu @@ -6,7 +6,7 @@ user: root estimated_duration: 20.0 requires: manifest.has_optee == 'True' imports: from com.canonical.plainbox import manifest -command: +command: node="$(find /dev -type c -regex '.*/\(tee\|teepriv\)[0-9]')" if [[ -n $node ]]; then echo -e "\nInfo: Find OP-TEE node in the system!" @@ -26,7 +26,7 @@ plugin: shell user: root estimated_duration: 20.0 environ: OPTEE_TOOL -command: +command: if [[ -z "$OPTEE_TOOL" ]]; then gadget=$(snap list | awk '$NF == "gadget" {print $1}') xtest=$(snap info "$gadget"|grep xtest) @@ -75,7 +75,7 @@ command: exit 1 else echo -e '\nInfo: TA installed SUCCESS!' - fi + fi fi id: ce-oem-optee-test-list @@ -91,14 +91,14 @@ command: filepath="$PLAINBOX_PROVIDER_DATA/optee-test.json" fi parse_optee_test.py "$filepath" -_summary: Collect the test cases support by OP-TEE test(xtest) +_summary: Collect the test cases support by OP-TEE test(xtest) id: ce-oem-optee-test-list-pkcs11 estimated_duration: 1 plugin: resource user: root environ: OPTEE_CASES -command: +command: filepath="" if [[ -n "$OPTEE_CASES" ]]; then filepath="$OPTEE_CASES" @@ -106,14 +106,14 @@ command: filepath="$PLAINBOX_PROVIDER_DATA/optee-test.json" fi parse_optee_test.py "$filepath" -p -_summary: Collect the test cases related with PKCS11 support by OP-TEE test(xtest) +_summary: Collect the test cases related with PKCS11 support by OP-TEE test(xtest) unit: template template-resource: ce-oem-optee-test-list template-unit: job template-engine: jinja2 id: ce-oem-optee/xtest-{{ suite }}-{{ description }} -_summary: +_summary: OP-TEE test by using xtest to test suite {{ suite }} {{ description }} plugin: shell user: root @@ -136,16 +136,16 @@ unit: template template-resource: ce-oem-optee-test-list-pkcs11 template-unit: job template-engine: jinja2 -id: ce-oem-optee/xtest-{{ suite }}-{{ description }} -_summary: - OP-TEE test by using xtest to test PKCS11 related {{ suite }} {{ description }} +id: ce-oem-optee/xtest-pkcs11-{{ description }} +_summary: + OP-TEE test by using xtest to test PKCS11 related {{ description }} plugin: shell user: root category_id: optee estimated_duration: 30 depends: ce-oem-optee/xtest-check flags: also-after-suspend -command: +command: tool="" if [[ -n "$OPTEE_TOOL" ]]; then tool="$OPTEE_TOOL" diff --git a/contrib/checkbox-provider-ce-oem/units/strict-confinement/mediacard-strict-confinement.pxu b/contrib/checkbox-provider-ce-oem/units/strict-confinement/mediacard-strict-confinement.pxu new file mode 100644 index 0000000000..169f332ee7 --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/units/strict-confinement/mediacard-strict-confinement.pxu @@ -0,0 +1,65 @@ +plugin: user-interact +category_id: strict-confinement-mode +id: strict-confine/mediacard/sdhc-insert +flags: also-after-suspend +estimated_duration: 30.0 +command: + test-strict-confinement.run-watcher insertion mediacard +imports: + from com.canonical.plainbox import manifest + from com.canonical.certification import snap + from com.canonical.certification import lsb +requires: + manifest.has_card_reader == 'True' + lsb.distributor_id == 'Ubuntu Core' + snap.name == 'test-strict-confinement' +user: root +_summary: Test that insertion of an SDHC card is detected +_description: + Verify the SDHC card insertion by checking journal log. +_purpose: + This test will check that the systems media card reader can + detect the insertion of a UNLOCKED Secure Digital High-Capacity + (SDHC) media card +_steps: + 1. Commence the test and then insert an UNLOCKED SDHC card into the reader. + (Note: this test will time-out after 20 seconds.) + 2. Do not remove the device after this test. +_verification: + The verification of this test is automated. Do not change the + automatically selected result. + +plugin: shell +category_id: strict-confinement-mode +id: strict-confine/mediacard/sdhc-storage +estimated_duration: 30.0 +depends: strict-confine/mediacard/sdhc-insert +user: root +flags: preserve-cwd reset-locale also-after-suspend +command: test-strict-confinement.usb-read-write +_summary: Test reading & writing to a SDHC Card +_description: + This test is automated and executes after the mediacard/sdhc-insert + test is run. It tests reading and writing to the SDHC card. + +plugin: user-interact +category_id: strict-confinement-mode +id: strict-confine/mediacard/sdhc-remove +flags: also-after-suspend +estimated_duration: 30.0 +depends: strict-confine/mediacard/sdhc-insert +command: + test-strict-confinement.run-watcher removal mediacard +user: root +_summary: Test that removal of an SDHC card is detected +_description: + Verify the SDHC card insertion by checking journal log. +_purpose: + This test will check that the system correctly detects + the removal of an SDHC card from the systems card reader. +_steps: + 1. Commence the test and then remove the SDHC card from the reader. + (Note: this test will time-out after 20 seconds.) +_verification: + The verification of this test is automated. Do not change the + automatically selected result. diff --git a/contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-stress-strict-confinement.pxu b/contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-stress-strict-confinement.pxu deleted file mode 100644 index df61c6788d..0000000000 --- a/contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-stress-strict-confinement.pxu +++ /dev/null @@ -1,20 +0,0 @@ -id: dbus-warm-boot -unit: test plan -_name: Warm reboot test via dbus -_description: Warm reboot test by using dbus command -bootstrap_include: - com.canonical.certification::reboot-run-generator -include: - dbus-warm-boot-boot - dbus-warm-boot-test - - -id: dbus-cold-boot -unit: test plan -_name: Cold boot test via dbus -_description: Cold boot test by using dbus command -bootstrap_include: - com.canonical.certification::reboot-run-generator -include: - dbus-cold-boot-boot - dbus-cold-boot-test diff --git a/contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-strict-confinement.pxu b/contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-strict-confinement.pxu new file mode 100644 index 0000000000..e7a365d10a --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-strict-confinement.pxu @@ -0,0 +1,36 @@ +id: dbus-warm-boot +unit: test plan +_name: Warm reboot test via dbus +_description: + Warm reboot test by using dbus command. + Rely on test-strict-confinement SNAP to test. +bootstrap_include: + com.canonical.certification::reboot-run-generator +include: + dbus-warm-boot-boot + dbus-warm-boot-test + + +id: dbus-cold-boot +unit: test plan +_name: Cold boot test via dbus +_description: + Cold boot test by using dbus command. + Rely on test-strict-confinement SNAP to test. +bootstrap_include: + com.canonical.certification::reboot-run-generator +include: + dbus-cold-boot-boot + dbus-cold-boot-test + + +id: strict-confine-mediacard +unit: test plan +_name: Test mediacard in strict confinement mode. +_description: + Test mediacard in strict confinement mode. + Rely on test-strict-confinement SNAP to test. +include: + strict-confine/mediacard/sdhc-insert + strict-confine/mediacard/sdhc-storage + strict-confine/mediacard/sdhc-remove From dc1adfa2150abe02ffded82d4ac2d114f77c1640 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Tue, 20 Feb 2024 13:08:50 +0100 Subject: [PATCH 038/108] Resume screen for checkbox remote (New) (#839) * add an urwid screen for resuming a session * properly store resume outcome on cert-blockers * mock the maybe resume auto function too Signed-off-by: Maciej Kisielewski * Tests for _resume_session * Tests for utils functions * Test finalize_session * Fixed bug in resume session for failing cert blockers * remove another unused import * add an urwid screen for resuming a session * fix corner cases that yielded crashes properly store resume outcome on cert-blockers update the cert-blocker metabox resume scenario mock the maybe resume auto function too Signed-off-by: Maciej Kisielewski Excluded UI files from unit tests coverage Test restart and start_new_session or resume Test the Launcher resume behaviour Tests for _resume_session Fixed invoke_resume test Fixed cert blocker requiring comments Tests for utils functions Removed black quirk re-inlined strings Test finalize_session Fixed bug in resume session for failing cert blockers * Draft implementation Minor: move shared functionality to utils * Make self.get_resumable_sessions callable from more states * Make overwrite_result_dict param to resume_by_id Minor: Small refactor to remove os exists Minor: removed unused function * Remove moved code restored by rebase and updated tests * Restored decode of utf8 byte arrays * Make this scenario remote+local * Full working version of resume Minor: change variable name to be comprehensible * Made interact job certblocker * Actually always run jobs after resume or start * Reset session state to idle in the handler manually * Test new functionality of assistant.py Minor: tests for proxy functions Minor: call proxy function in controller.py Minor: add documentation to notable function * Unmove line of UsageExpectation * Fix result from file test Minor: removed unused variable * Test resume session menu Minor: small refactoring making names clearer * Init tests for new resume functionality of controller * Compatibility with pythonsaurus --------- Signed-off-by: Maciej Kisielewski Co-authored-by: Maciej Kisielewski --- .../checkbox_ng/launcher/controller.py | 145 ++++++- .../checkbox_ng/launcher/subcommands.py | 62 +-- .../checkbox_ng/launcher/test_controller.py | 409 ++++++++++++++++++ .../checkbox_ng/launcher/test_subcommands.py | 16 +- checkbox-ng/checkbox_ng/utils.py | 56 +++ .../plainbox/impl/session/assistant.py | 16 +- .../plainbox/impl/session/remote_assistant.py | 51 ++- .../plainbox/impl/session/test_assistant.py | 29 ++ .../impl/session/test_remote_assistant.py | 78 +++- .../units/cert-blocker-tps.pxu | 4 +- 10 files changed, 757 insertions(+), 109 deletions(-) diff --git a/checkbox-ng/checkbox_ng/launcher/controller.py b/checkbox-ng/checkbox_ng/launcher/controller.py index 5df25e5e2e..f5b5ea9c77 100644 --- a/checkbox-ng/checkbox_ng/launcher/controller.py +++ b/checkbox-ng/checkbox_ng/launcher/controller.py @@ -38,16 +38,26 @@ from tempfile import SpooledTemporaryFile from plainbox.abc import IJobResult +from plainbox.impl.result import MemoryJobResult from plainbox.impl.color import Colorizer from plainbox.impl.config import Configuration from plainbox.impl.session.remote_assistant import RemoteSessionAssistant from plainbox.vendor import rpyc -from checkbox_ng.urwid_ui import TestPlanBrowser -from checkbox_ng.urwid_ui import CategoryBrowser -from checkbox_ng.urwid_ui import ManifestBrowser -from checkbox_ng.urwid_ui import ReRunBrowser -from checkbox_ng.urwid_ui import interrupt_dialog -from checkbox_ng.urwid_ui import resume_dialog +from checkbox_ng.resume_menu import ResumeMenu +from checkbox_ng.urwid_ui import ( + TestPlanBrowser, + CategoryBrowser, + ManifestBrowser, + ReRunBrowser, + interrupt_dialog, + resume_dialog, + ResumeInstead, +) +from checkbox_ng.utils import ( + generate_resume_candidate_description, + newline_join, + request_comment, +) from checkbox_ng.launcher.run import NormalUI, ReRunJob from checkbox_ng.launcher.stages import MainLoopStage from checkbox_ng.launcher.stages import ReportsStage @@ -294,7 +304,7 @@ def quitter(msg): "running": self.wait_and_continue, "finalizing": self.finish_session, "testsselected": partial( - self.run_jobs, resumed_session_info=payload + self.run_jobs, resumed_ongoing_session_info=payload ), "bootstrapping": self.restart, "bootstrapped": partial( @@ -349,7 +359,6 @@ def resume_or_start_new_session(self): configuration = dict() configuration["launcher"] = self._launcher_text configuration["normal_user"] = self._normal_user - try: _logger.info("remote: Starting new session.") tps = self.sa.start_session(configuration) @@ -371,8 +380,9 @@ def resume_or_start_new_session(self): else: self.interactively_choose_tp(tps) - def interactively_choose_tp(self, tps): - _logger.info("controller: Interactively choosing TP.") + self.run_jobs() + + def _new_session_flow(self, tps, resumable_sessions): tp_info_list = [{"id": tp[0], "name": tp[1]} for tp in tps] if not tp_info_list: _logger.error(_("There were no test plans to select from!")) @@ -381,11 +391,11 @@ def interactively_choose_tp(self, tps): _("Select test plan"), tp_info_list, self.launcher.get_value("test plan", "unit"), + len(resumable_sessions), ).run() if selected_tp is None: print(_("Nothing selected")) raise SystemExit(0) - self.select_tp(selected_tp) if not self.jobs: _logger.error(self.C.RED(_("There were no tests to select from!"))) @@ -393,6 +403,108 @@ def interactively_choose_tp(self, tps): return self.select_jobs(self.jobs) + def _resume_session_menu(self, resumable_sessions): + """ + Run the interactive resume menu. + Returns True if a session was resumed, False otherwise. + """ + entries = [ + ( + candidate.id, + generate_resume_candidate_description(candidate), + ) + for candidate in resumable_sessions + ] + while True: + # let's loop until someone selects something else than "delete" + # in other words, after each delete action let's go back to the + # resume menu + + resume_params = ResumeMenu(entries).run() + if resume_params.action == "delete": + self.sa.delete_sessions([resume_params.session_id]) + self.resume_candidates = list(self.sa.get_resumable_sessions()) + + # the entries list is just a copy of the resume_candidates, + # and it's not updated when we delete a session, so we need + # to update it manually + entries = [ + en for en in entries if en[0] != resume_params.session_id + ] + + if not entries: + # if everything got deleted let's go back to the test plan + # selection menu + return False + else: + break + + if resume_params.session_id: + self._resume_session(resume_params) + return True + return False + + def _resume_session(self, resume_params): + metadata = self.sa._sa.resume_session(resume_params.session_id) + if "testplanless" not in metadata.flags: + app_blob = json.loads(metadata.app_blob.decode("UTF-8")) + test_plan_id = app_blob["testplan_id"] + self.sa._sa.select_test_plan(test_plan_id) + self.sa._sa.bootstrap() + last_job = metadata.running_job_name + is_cert_blocker = ( + self.sa._sa.get_job_state(last_job).effective_certification_status + == "blocker" + ) + # If we resumed maybe not rerun the same, probably broken job + result_dict = { + "comments": resume_params.comments, + } + if resume_params.action == "pass": + result_dict["comments"] = newline_join( + result_dict["comments"], "Passed after resuming execution" + ) + + result_dict["outcome"] = IJobResult.OUTCOME_PASS + elif resume_params.action == "fail": + if is_cert_blocker and not resume_params.comments: + # cert blockers must be commented when failing + result_dict["comments"] = request_comment("why it failed.") + else: + result_dict["comments"] = newline_join( + result_dict["comments"], "Failed after resuming execution" + ) + + result_dict["outcome"] = IJobResult.OUTCOME_FAIL + elif resume_params.action == "skip": + if is_cert_blocker and not resume_params.comments: + # cert blockers must be commented when skipped + result_dict["comments"] = request_comment( + "why you want to skip it." + ) + else: + result_dict["comments"] = newline_join( + result_dict["comments"], "Skipped after resuming execution" + ) + result_dict["outcome"] = IJobResult.OUTCOME_SKIP + elif resume_params.action == "rerun": + # if the job outcome is set to none it will be rerun + result_dict["outcome"] = None + self.sa.resume_by_id(resume_params.session_id, result_dict) + + def interactively_choose_tp(self, tps): + _logger.info("controller: Interactively choosing TP.") + something_got_chosen = False + while not something_got_chosen: + resumable_sessions = list(self.sa.get_resumable_sessions()) + try: + self._new_session_flow(tps, resumable_sessions) + something_got_chosen = True + except ResumeInstead: + something_got_chosen = self._resume_session_menu( + resumable_sessions + ) + def select_tp(self, tp): _logger.info("controller: Selected test plan: %s", tp) try: @@ -462,7 +574,6 @@ def select_jobs(self, all_jobs): self.sa.modify_todo_list(chosen_jobs) self._save_manifest(interactive=True) self.sa.finish_job_selection() - self.run_jobs() def register_arguments(self, parser): parser.add_argument("host", help=_("target host")) @@ -551,9 +662,12 @@ def _handle_last_job_after_resume(self, resumed_session_info): + SimpleUI.C.result(self.sa.get_job_result(job["id"])) ) - def run_jobs(self, resumed_session_info=None): - if resumed_session_info and resumed_session_info["last_job"]: - self._handle_last_job_after_resume(resumed_session_info) + def run_jobs(self, resumed_ongoing_session_info=None): + if ( + resumed_ongoing_session_info + and resumed_ongoing_session_info["last_job"] + ): + self._handle_last_job_after_resume(resumed_ongoing_session_info) _logger.info("controller: Running jobs.") jobs = self.sa.get_session_progress() _logger.debug( @@ -698,6 +812,7 @@ def _maybe_manual_rerun_jobs(self): def _run_jobs(self, jobs_repr, total_num=0): for job in jobs_repr: job_state = self.sa._sa.get_job_state(job["id"]) + self.sa.note_metadata_starting_job(job, job_state) SimpleUI.header( _("Running job {} / {}").format( job["num"], total_num, fill="-" diff --git a/checkbox-ng/checkbox_ng/launcher/subcommands.py b/checkbox-ng/checkbox_ng/launcher/subcommands.py index d3d3475dec..1dd4955614 100644 --- a/checkbox-ng/checkbox_ng/launcher/subcommands.py +++ b/checkbox-ng/checkbox_ng/launcher/subcommands.py @@ -21,9 +21,9 @@ from argparse import ArgumentTypeError from argparse import SUPPRESS from collections import defaultdict -from datetime import datetime from string import Formatter from tempfile import TemporaryDirectory +import textwrap import fnmatch import gettext import json @@ -34,7 +34,6 @@ import shlex import sys import tarfile -import textwrap import time from plainbox.abc import IJobResult @@ -66,7 +65,11 @@ from checkbox_ng.urwid_ui import ReRunBrowser from checkbox_ng.urwid_ui import ResumeInstead from checkbox_ng.urwid_ui import TestPlanBrowser -from checkbox_ng.utils import newline_join +from checkbox_ng.utils import ( + newline_join, + generate_resume_candidate_description, + request_comment, +) _ = gettext.gettext @@ -370,7 +373,7 @@ def _manually_resume_session(self, resume_candidates): entries = [ ( candidate.id, - _generate_resume_candidate_description(candidate), + generate_resume_candidate_description(candidate), ) for candidate in resume_candidates ] @@ -1358,54 +1361,3 @@ def _print_obj(self, obj): # provider and service does not have origin for k, v in obj.attrs.items(): print("{}: {}".format(k, v)) - - -def _generate_resume_candidate_description(candidate): - template = textwrap.dedent( - """ - Session Title: - {session_title} - - Test plan used: - {tp_id} - - Last job that was run: - {last_job_id} - - Last job was started at: - {last_job_start_time} - """ - ) - app_blob = json.loads(candidate.metadata.app_blob.decode("UTF-8")) - session_title = candidate.metadata.title or "Unknown" - tp_id = app_blob.get("testplan_id", "Unknown") - last_job_id = candidate.metadata.running_job_name or "Unknown" - last_job_timestamp = candidate.metadata.last_job_start_time or None - if last_job_timestamp: - dt = datetime.utcfromtimestamp(last_job_timestamp) - last_job_start_time = dt.strftime("%Y-%m-%d %H:%M:%S") - else: - last_job_start_time = "Unknown" - return template.format( - session_title=session_title, - tp_id=tp_id, - last_job_id=last_job_id, - last_job_start_time=last_job_start_time, - ) - - -def request_comment(prompt: str) -> str: - """ - Request a comment from the user. - :param prompt: the thing that user has to explain with their comment - :return: the comment provided by the user - """ - colorizer = Colorizer() - red = colorizer.RED - blue = colorizer.BLUE - comment = "" - while not comment: - print(red("This job is required in order to issue a certificate.")) - print(red("Please add a comment to explain {}.".format(prompt))) - comment = input(blue("Please enter your comments:\n")) - return comment diff --git a/checkbox-ng/checkbox_ng/launcher/test_controller.py b/checkbox-ng/checkbox_ng/launcher/test_controller.py index 5cafe9f747..45c49f1515 100644 --- a/checkbox-ng/checkbox_ng/launcher/test_controller.py +++ b/checkbox-ng/checkbox_ng/launcher/test_controller.py @@ -21,6 +21,7 @@ from unittest import TestCase, mock +from checkbox_ng.urwid_ui import ResumeInstead from checkbox_ng.launcher.controller import RemoteController from checkbox_ng.launcher.controller import is_hostname_a_loopback @@ -397,6 +398,414 @@ def test__run_jobs_steps_quit(self, simple_ui_mock): with self.assertRaises(SystemExit): RemoteController._run_jobs(self_mock, [jobs_repr_mock]) + @mock.patch( + "checkbox_ng.launcher.controller.generate_resume_candidate_description", + new=mock.MagicMock(), + ) + @mock.patch("checkbox_ng.launcher.controller.ResumeMenu") + def test_delete_session(self, mock_menu): + self_mock = mock.MagicMock() + resumable_sessions = [ + mock.MagicMock(id=1, name="Session 1"), + mock.MagicMock(id=2, name="Session 2"), + ] + menu_actions_buffer = [ + mock.MagicMock( + action="delete", session_id=1 + ), # First call simulates deletion + mock.MagicMock( + action="resume", session_id=2 + ), # Second call simulates resuming a session + ] + # Setup the mock to simulate delete action + mock_menu.return_value.run.side_effect = menu_actions_buffer + + self_mock.sa.get_resumable_sessions.return_value = resumable_sessions[ + 1: + ] + + resumed = RemoteController._resume_session_menu( + self_mock, resumable_sessions + ) + + # Check if the session was resumed correctly after deletion + self.assertTrue(resumed) + self_mock._resume_session.assert_called_once_with( + menu_actions_buffer[1] + ) + self_mock.sa.delete_sessions.assert_called_once_with([1]) + + @mock.patch( + "checkbox_ng.launcher.controller.generate_resume_candidate_description", + new=mock.MagicMock(), + ) + @mock.patch("checkbox_ng.launcher.controller.ResumeMenu") + def test_no_session_resumed(self, mock_menu): + self_mock = mock.MagicMock() + resumable_sessions = [ + mock.MagicMock(id=1, name="Session 1"), + mock.MagicMock(id=2, name="Session 2"), + ] + menu_actions_buffer = [ + mock.MagicMock( + action="delete", session_id=1 + ), # First call simulates deletion + mock.MagicMock( + action="resume", session_id=2 + ), # Second call simulates resuming a session + ] + # Setup the mock to simulate delete action + mock_menu.return_value.run.side_effect = menu_actions_buffer + + self_mock.sa.get_resumable_sessions.return_value = [] + + resumed = RemoteController._resume_session_menu( + self_mock, [resumable_sessions[0]] + ) + + # Check that the method returns False when all sessions are deleted + self.assertFalse(resumed) + + @mock.patch( + "checkbox_ng.launcher.controller.generate_resume_candidate_description", + new=mock.MagicMock(), + ) + @mock.patch("checkbox_ng.launcher.controller.ResumeMenu") + def test_session_resumed_no_id(self, mock_menu): + self_mock = mock.MagicMock() + resumable_sessions = [ + mock.MagicMock(id=1, name="Session 1"), + mock.MagicMock(id=2, name="Session 2"), + ] + # Setup the mock to simulate selecting a session to resume + mock_menu.return_value.run.return_value = mock.MagicMock( + action="resume", session_id=None + ) + + resumed = RemoteController._resume_session_menu( + self_mock, resumable_sessions + ) + + # Check that the method returns True when a session is resumed + self.assertFalse(resumed) + self.assertFalse(self_mock._resume_session.called) + + @mock.patch( + "checkbox_ng.launcher.controller.generate_resume_candidate_description", + new=mock.MagicMock(), + ) + @mock.patch("checkbox_ng.launcher.controller.ResumeMenu") + def test_session_resumed(self, mock_menu): + self_mock = mock.MagicMock() + resumable_sessions = [ + mock.MagicMock(id=1, name="Session 1"), + mock.MagicMock(id=2, name="Session 2"), + ] + # Setup the mock to simulate selecting a session to resume + mock_menu.return_value.run.return_value = mock.MagicMock( + action="resume", session_id=2 + ) + + resumed = RemoteController._resume_session_menu( + self_mock, resumable_sessions + ) + + # Check that the method returns True when a session is resumed + self.assertTrue(resumed) + self.assertTrue(self_mock._resume_session.called) + + @mock.patch("json.loads") + @mock.patch("builtins.open") + @mock.patch("checkbox_ng.launcher.controller.IJobResult") + @mock.patch( + "checkbox_ng.launcher.controller.request_comment", + new=mock.MagicMock(), + ) + def test_resume_session_pass( + self, + mock_IJobResult, + mock_open, + mock_loads, + ): + mock_loads.return_value = {"testplan_id": "abc"} + + sa_mock = mock.MagicMock() + resume_params = mock.MagicMock( + action="pass", session_id="123", comments="Initial comment" + ) + metadata_mock = mock.MagicMock( + app_blob=b'{"testplan_id": "abc"}', + flags=[], + running_job_name="job1", + ) + sa_mock._sa.resume_session.return_value = metadata_mock + sa_mock._sa.get_job_state.return_value = mock.MagicMock( + effective_certification_status="non-blocker" + ) + + self_mock = mock.MagicMock(sa=sa_mock) + + RemoteController._resume_session(self_mock, resume_params) + + # Assertions + sa_mock._sa.resume_session.assert_called_once_with("123") + sa_mock._sa.select_test_plan.assert_called_once_with("abc") + self.assertTrue(sa_mock._sa.bootstrap.called) + sa_mock.resume_by_id.assert_called_once_with( + "123", + { + "comments": "Initial comment\nPassed after resuming execution", + "outcome": mock_IJobResult.OUTCOME_PASS, + }, + ) + + @mock.patch("json.loads") + @mock.patch("builtins.open") + @mock.patch("checkbox_ng.launcher.controller.IJobResult") + @mock.patch( + "checkbox_ng.launcher.controller.request_comment", + new=mock.MagicMock(return_value="comment requested from user"), + ) + def test_resume_session_fail_not_cert_blocker( + self, + mock_IJobResult, + mock_open, + mock_loads, + ): + mock_loads.return_value = {"testplan_id": "abc"} + + sa_mock = mock.MagicMock() + resume_params = mock.MagicMock( + action="fail", session_id="123", comments="Initial comment" + ) + metadata_mock = mock.MagicMock( + app_blob=b'{"testplan_id": "abc"}', + flags=[], + running_job_name="job1", + ) + sa_mock._sa.resume_session.return_value = metadata_mock + sa_mock._sa.get_job_state.return_value = mock.MagicMock( + effective_certification_status="non-blocker" + ) + + self_mock = mock.MagicMock(sa=sa_mock) + + RemoteController._resume_session(self_mock, resume_params) + + # Assertions + sa_mock._sa.resume_session.assert_called_once_with("123") + sa_mock._sa.select_test_plan.assert_called_once_with("abc") + self.assertTrue(sa_mock._sa.bootstrap.called) + sa_mock.resume_by_id.assert_called_once_with( + "123", + { + "comments": "Initial comment\nFailed after resuming execution", + "outcome": mock_IJobResult.OUTCOME_FAIL, + }, + ) + + @mock.patch("json.loads") + @mock.patch("builtins.open") + @mock.patch("checkbox_ng.launcher.controller.IJobResult") + @mock.patch( + "checkbox_ng.launcher.controller.request_comment", + new=mock.MagicMock(return_value="comment requested from user"), + ) + def test_resume_session_fail_cert_blocker( + self, + mock_IJobResult, + mock_open, + mock_loads, + ): + mock_loads.return_value = {"testplan_id": "abc"} + + sa_mock = mock.MagicMock() + resume_params = mock.MagicMock( + action="fail", session_id="123", comments=None + ) + metadata_mock = mock.MagicMock( + app_blob=b'{"testplan_id": "abc"}', + flags=["testplanless"], + running_job_name="job1", + ) + sa_mock._sa.resume_session.return_value = metadata_mock + sa_mock._sa.get_job_state.return_value = mock.MagicMock( + effective_certification_status="blocker" + ) + + self_mock = mock.MagicMock(sa=sa_mock) + + RemoteController._resume_session(self_mock, resume_params) + + # Assertions + sa_mock._sa.resume_session.assert_called_once_with("123") + sa_mock.resume_by_id.assert_called_once_with( + "123", + { + "comments": "comment requested from user", + "outcome": mock_IJobResult.OUTCOME_FAIL, + }, + ) + + @mock.patch("json.loads") + @mock.patch("builtins.open") + @mock.patch("checkbox_ng.launcher.controller.IJobResult") + @mock.patch( + "checkbox_ng.launcher.controller.request_comment", + new=mock.MagicMock(return_value="comment requested from user"), + ) + def test_resume_session_skip_not_cert_blocker( + self, + mock_IJobResult, + mock_open, + mock_loads, + ): + mock_loads.return_value = {"testplan_id": "abc"} + + sa_mock = mock.MagicMock() + resume_params = mock.MagicMock( + action="skip", session_id="123", comments="Initial comment" + ) + metadata_mock = mock.MagicMock( + app_blob=b'{"testplan_id": "abc"}', + flags=[], + running_job_name="job1", + ) + sa_mock._sa.resume_session.return_value = metadata_mock + sa_mock._sa.get_job_state.return_value = mock.MagicMock( + effective_certification_status="non-blocker" + ) + + self_mock = mock.MagicMock(sa=sa_mock) + + RemoteController._resume_session(self_mock, resume_params) + + # Assertions + sa_mock._sa.resume_session.assert_called_once_with("123") + sa_mock._sa.select_test_plan.assert_called_once_with("abc") + self.assertTrue(sa_mock._sa.bootstrap.called) + sa_mock.resume_by_id.assert_called_once_with( + "123", + { + "comments": "Initial comment\nSkipped after resuming execution", + "outcome": mock_IJobResult.OUTCOME_SKIP, + }, + ) + + @mock.patch("json.loads") + @mock.patch("builtins.open") + @mock.patch("checkbox_ng.launcher.controller.IJobResult") + @mock.patch( + "checkbox_ng.launcher.controller.request_comment", + new=mock.MagicMock(return_value="comment requested from user"), + ) + def test_resume_session_skip_cert_blocker( + self, + mock_IJobResult, + mock_open, + mock_loads, + ): + mock_loads.return_value = {"testplan_id": "abc"} + + sa_mock = mock.MagicMock() + resume_params = mock.MagicMock( + action="skip", session_id="123", comments=None + ) + metadata_mock = mock.MagicMock( + app_blob=b'{"testplan_id": "abc"}', + flags=["testplanless"], + running_job_name="job1", + ) + sa_mock._sa.resume_session.return_value = metadata_mock + sa_mock._sa.get_job_state.return_value = mock.MagicMock( + effective_certification_status="blocker" + ) + + self_mock = mock.MagicMock(sa=sa_mock) + + RemoteController._resume_session(self_mock, resume_params) + + # Assertions + sa_mock._sa.resume_session.assert_called_once_with("123") + sa_mock.resume_by_id.assert_called_once_with( + "123", + { + "comments": "comment requested from user", + "outcome": mock_IJobResult.OUTCOME_SKIP, + }, + ) + + @mock.patch("json.loads") + @mock.patch("builtins.open") + @mock.patch("checkbox_ng.launcher.controller.IJobResult") + @mock.patch( + "checkbox_ng.launcher.controller.request_comment", + new=mock.MagicMock(return_value="comment requested from user"), + ) + def test_resume_session_rerun( + self, + mock_IJobResult, + mock_open, + mock_loads, + ): + mock_loads.return_value = {"testplan_id": "abc"} + + sa_mock = mock.MagicMock() + resume_params = mock.MagicMock( + action="rerun", session_id="123", comments=None + ) + metadata_mock = mock.MagicMock( + app_blob=b'{"testplan_id": "abc"}', + flags=["testplanless"], + running_job_name="job1", + ) + sa_mock._sa.resume_session.return_value = metadata_mock + sa_mock._sa.get_job_state.return_value = mock.MagicMock( + effective_certification_status="blocker" + ) + + self_mock = mock.MagicMock(sa=sa_mock) + + RemoteController._resume_session(self_mock, resume_params) + + # Assertions + sa_mock._sa.resume_session.assert_called_once_with("123") + sa_mock.resume_by_id.assert_called_once_with( + "123", + { + "comments": None, + "outcome": None, + }, + ) + + def test_interactively_choose_tp(self): + self_mock = mock.MagicMock() + + # by default always try to start a new session and not resuming + RemoteController.interactively_choose_tp(self_mock, []) + + self.assertTrue(self_mock._new_session_flow.called) + self.assertFalse(self_mock._resume_session_menu.called) + + def test_interactively_choose_tp_resume(self): + self_mock = mock.MagicMock() + self_mock._new_session_flow.side_effect = ResumeInstead + self_mock._resume_session_menu.return_value = True + + RemoteController.interactively_choose_tp(self_mock, []) + + self.assertTrue(self_mock._new_session_flow.called) + self.assertTrue(self_mock._resume_session_menu.called) + + def test_interactively_choose_tp_resume_retry_tp(self): + self_mock = mock.MagicMock() + self_mock._new_session_flow.side_effect = [ResumeInstead, True] + self_mock._resume_session_menu.return_value = True + + RemoteController.interactively_choose_tp(self_mock, []) + + self.assertTrue(self_mock._new_session_flow.called) + self.assertTrue(self_mock._resume_session_menu.called) class IsHostnameALoopbackTests(TestCase): @mock.patch("socket.gethostbyname") diff --git a/checkbox-ng/checkbox_ng/launcher/test_subcommands.py b/checkbox-ng/checkbox_ng/launcher/test_subcommands.py index 93c425053b..44a9088d9c 100644 --- a/checkbox-ng/checkbox_ng/launcher/test_subcommands.py +++ b/checkbox-ng/checkbox_ng/launcher/test_subcommands.py @@ -22,7 +22,6 @@ from unittest.mock import patch, Mock, MagicMock -from unittest.mock import patch, Mock, MagicMock, call from io import StringIO from checkbox_ng.launcher.subcommands import ( Launcher, @@ -30,9 +29,8 @@ ResumeInstead, IJobResult, request_comment, - _generate_resume_candidate_description, + generate_resume_candidate_description, ) -from unittest.mock import patch, Mock, MagicMock class TestLauncher(TestCase): @@ -114,6 +112,7 @@ def test__manually_resume_session_empty_id(self, resume_menu_mock): self.assertFalse(Launcher._manually_resume_session(self_mock, [])) + @patch("checkbox_ng.launcher.subcommands.MemoryJobResult") @patch("checkbox_ng.launcher.subcommands.newline_join", new=MagicMock()) def test__resume_session_pass(self, memory_job_result_mock): @@ -398,7 +397,6 @@ def test_invoke_print_output_customized_format(self, stdout): self.launcher.invoked(self.ctx) self.assertEqual(stdout.getvalue(), expected_out) - class TestUtilsFunctions(TestCase): @patch("checkbox_ng.launcher.subcommands.Colorizer", new=MagicMock()) @patch("builtins.print") @@ -410,21 +408,21 @@ def test_request_comment(self, input_mock, print_mock): self.assertEqual(comment, "failure") - def test__generate_resume_candidate_description_default_time(self): + def test_generate_resume_candidate_description_default_time(self): candidate_mock = MagicMock() candidate_mock.metadata.app_blob = b'{ "testplan_id" : "123" }' candidate_mock.metadata.title = "Title" candidate_mock.metadata.last_job_start_time = None candidate_mock.metadata.running_job_name = "Test" - description = _generate_resume_candidate_description(candidate_mock) + description = generate_resume_candidate_description(candidate_mock) self.assertIn("Unknown", description) self.assertIn("123", description) self.assertIn("Title", description) self.assertIn("Test", description) - def test__generate_resume_candidate_description(self): + def test_generate_resume_candidate_description(self): candidate_mock = MagicMock() candidate_mock.metadata.app_blob = b'{ "testplan_id" : "123" }' candidate_mock.metadata.title = "Title" @@ -434,9 +432,9 @@ def test__generate_resume_candidate_description(self): candidate_mock.metadata.last_job_start_time = date.timestamp() candidate_mock.metadata.running_job_name = "Test" - description = _generate_resume_candidate_description(candidate_mock) + description = generate_resume_candidate_description(candidate_mock) self.assertIn("2023", description) self.assertIn("123", description) self.assertIn("Title", description) - self.assertIn("Test", description) \ No newline at end of file + self.assertIn("Test", description) diff --git a/checkbox-ng/checkbox_ng/utils.py b/checkbox-ng/checkbox_ng/utils.py index d81fe355c1..90e8ab9dd4 100644 --- a/checkbox-ng/checkbox_ng/utils.py +++ b/checkbox-ng/checkbox_ng/utils.py @@ -19,6 +19,11 @@ """ Generic utility functions. """ +import json +import textwrap +from datetime import datetime + +from plainbox.impl.color import Colorizer def newline_join(head: str, *tail: str) -> str: @@ -31,3 +36,54 @@ def newline_join(head: str, *tail: str) -> str: if not head: return "\n".join(tail) return "\n".join((head, *tail)) + + +def generate_resume_candidate_description(candidate): + template = textwrap.dedent( + """ + Session Title: + {session_title} + + Test plan used: + {tp_id} + + Last job that was run: + {last_job_id} + + Last job was started at: + {last_job_start_time} + """ + ) + app_blob = json.loads(candidate.metadata.app_blob.decode("UTF-8")) + session_title = candidate.metadata.title or "Unknown" + tp_id = app_blob.get("testplan_id", "Unknown") + last_job_id = candidate.metadata.running_job_name or "Unknown" + last_job_timestamp = candidate.metadata.last_job_start_time or None + if last_job_timestamp: + dt = datetime.utcfromtimestamp(last_job_timestamp) + last_job_start_time = dt.strftime("%Y-%m-%d %H:%M:%S") + else: + last_job_start_time = "Unknown" + return template.format( + session_title=session_title, + tp_id=tp_id, + last_job_id=last_job_id, + last_job_start_time=last_job_start_time, + ) + + +def request_comment(prompt: str) -> str: + """ + Request a comment from the user. + :param prompt: the thing that user has to explain with their comment + :return: the comment provided by the user + """ + colorizer = Colorizer() + red = colorizer.RED + blue = colorizer.BLUE + comment = "" + while not comment: + print(red("This job is required in order to issue a certificate.")) + print(red("Please add a comment to explain {}.".format(prompt))) + comment = input(blue("Please enter your comments:\n")) + return comment diff --git a/checkbox-ng/plainbox/impl/session/assistant.py b/checkbox-ng/plainbox/impl/session/assistant.py index 10361bae0e..75e0c30932 100644 --- a/checkbox-ng/plainbox/impl/session/assistant.py +++ b/checkbox-ng/plainbox/impl/session/assistant.py @@ -444,7 +444,6 @@ def get_old_sessions( str(exc), ) - @raises(UnexpectedMethodCall) def delete_sessions(self, session_ids: "List[str]") -> None: """ Delete session storages. @@ -460,7 +459,6 @@ def delete_sessions(self, session_ids: "List[str]") -> None: If the session is not found in the currently selected session repository, it is silently ignored. """ - UsageExpectation.of(self).enforce() for storage in WellKnownDirsHelper.get_storage_list(): if storage.id in session_ids: storage.remove() @@ -516,6 +514,7 @@ def start_new_session( self.select_test_plan: "select the test plan to execute", self.get_session_id: "to get the id of currently running session", self.hand_pick_jobs: "select jobs to run (w/o a test plan)", + self.get_resumable_sessions: "get resume candidates", self.finalize_session: "to finalize session", self.configure_application_restart: ( "configure automatic restart capability" @@ -1355,6 +1354,18 @@ def save_manifest(self, manifest_answers): with open(manifest, "wt", encoding="UTF-8") as stream: json.dump(manifest_cache, stream, sort_keys=True, indent=2) + def note_metadata_starting_job(self, job, job_state): + """ + Update the session metadata to make a resumable checkpoint. + + Without the information that this function stores, a session will not + be resumable. This also creates a checkpoint so that the information is + both in the session and on disk. + """ + self._metadata.running_job_name = job["id"] + self._metadata.last_job_start_time = time.time() + self._manager.checkpoint() + @raises(ValueError, TypeError, UnexpectedMethodCall) def run_job( self, job_id: str, ui: "Union[str, IJobRunnerUI]", native: bool @@ -1909,6 +1920,7 @@ def _get_allowed_calls_in_normal_state(self) -> dict: self.get_manifest_repr: ("to get participating manifest units"), self.run_job: "to run a given job", self.use_alternate_selection: "to change the selection", + self.get_resumable_sessions: "get resume candidates", self.hand_pick_jobs: "to generate new selection and use it", self.use_job_result: "to feed job result back to the session", # XXX: should this be available right off the bat or should we wait diff --git a/checkbox-ng/plainbox/impl/session/remote_assistant.py b/checkbox-ng/plainbox/impl/session/remote_assistant.py index 1785f67dda..c07ff8cab2 100644 --- a/checkbox-ng/plainbox/impl/session/remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/remote_assistant.py @@ -175,6 +175,9 @@ def _reset_sa(self): self.session_change_lock.acquire(blocking=False) self.session_change_lock.release() + def note_metadata_starting_job(self, job, job_state): + self._sa.note_metadata_starting_job(job, job_state) + @property def session_change_lock(self): return self._session_change_lock @@ -214,9 +217,7 @@ def remember_users_response(self, response): self._state = TestsSelected return elif response == "quit": - self._last_response = response - self._state = Idle - self.finalize_session() + self.abandon_session() return self._last_response = response self._state = Running @@ -394,7 +395,7 @@ def finish_job_selection(self): self._jobs_count = len(self._sa.get_dynamic_todo_list()) self._state = TestsSelected - @allowed_when(Interacting) + @allowed_when(Interacting, TestsSelected) def rerun_job(self, job_id, result): self._sa.use_job_result(job_id, result) self.session_change_lock.acquire(blocking=False) @@ -681,7 +682,13 @@ def get_jobs_repr(self, job_ids, offset=0): test_info_list = test_info_list + ((test_info,)) return json.dumps(test_info_list) - def resume_by_id(self, session_id=None): + def delete_sessions(self, session_list): + return self._sa.delete_sessions(session_list) + + def get_resumable_sessions(self): + return self._sa.get_resumable_sessions() + + def resume_by_id(self, session_id=None, overwrite_result_dict={}): _logger.info("resume_by_id: %r", session_id) self._launcher = load_configs() resume_candidates = list(self._sa.get_resumable_sessions()) @@ -701,7 +708,6 @@ def resume_by_id(self, session_id=None): } meta = self._sa.resume_session(session_id, runner_kwargs=runner_kwargs) app_blob = json.loads(meta.app_blob.decode("UTF-8")) - launcher = app_blob["launcher"] launcher_from_controller = Configuration.from_text( app_blob["launcher"], "Remote launcher" ) @@ -730,22 +736,19 @@ def resume_by_id(self, session_id=None): self._sa._manager.storage.id ) result_path = os.path.join(session_share, "__result") - if os.path.exists(result_path): - try: - with open(result_path, "rt") as f: - result_dict = json.load(f) - # the only really important field in the result is - # 'outcome' so let's make sure it doesn't contain - # anything stupid - if result_dict.get("outcome") not in [ - "pass", - "fail", - "skip", - ]: - result_dict["outcome"] = IJobResult.OUTCOME_PASS - except json.JSONDecodeError: - pass - else: + try: + with open(result_path, "rt") as f: + result_dict = json.load(f) + # the only really important field in the result is + # 'outcome' so let's make sure it doesn't contain + # anything stupid + if result_dict.get("outcome") not in [ + "pass", + "fail", + "skip", + ]: + result_dict["outcome"] = IJobResult.OUTCOME_PASS + except (json.JSONDecodeError, FileNotFoundError): the_job = self._sa.get_job(self._last_job) if the_job.plugin == "shell": if "noreturn" in the_job.get_flag_set(): @@ -753,6 +756,7 @@ def resume_by_id(self, session_id=None): else: result_dict["outcome"] = IJobResult.OUTCOME_CRASH + result_dict.update(overwrite_result_dict) result = MemoryJobResult(result_dict) if self._last_job: try: @@ -777,6 +781,9 @@ def finalize_session(self): self._sa.finalize_session() self._reset_sa() + def abandon_session(self): + self._reset_sa() + def transmit_input(self, text): if not text: self._pipe_from_controller.close() diff --git a/checkbox-ng/plainbox/impl/session/test_assistant.py b/checkbox-ng/plainbox/impl/session/test_assistant.py index 36db109a3c..049ab8cf0a 100644 --- a/checkbox-ng/plainbox/impl/session/test_assistant.py +++ b/checkbox-ng/plainbox/impl/session/test_assistant.py @@ -129,3 +129,32 @@ def test_finalize_session_bootstrapping( self.assertNotIn( SessionMetaData.FLAG_BOOTSTRAPPING, self_mock._metadata.flags ) + + @mock.patch("plainbox.impl.session.assistant.WellKnownDirsHelper") + def test_delete_sessions(self, mock_well_known_dirs_helper, _): + wkdh = mock_well_known_dirs_helper + + mock_storage_deleted = mock.MagicMock() + mock_storage_deleted.id = 1 + + mock_storage_not_deleted = mock.MagicMock() + mock_storage_not_deleted.id = 2 + + wkdh.get_storage_list.return_value = [ + mock_storage_deleted, + mock_storage_not_deleted, + ] + + SessionAssistant.delete_sessions(mock.MagicMock(), [1]) + + self.assertTrue(mock_storage_deleted.remove.called) + self.assertFalse(mock_storage_not_deleted.remove.called) + + def test_note_metadata_starting_job(self, _): + self_mock = mock.MagicMock() + + SessionAssistant.note_metadata_starting_job( + self_mock, {"id": 123}, mock.MagicMock() + ) + + self.assertTrue(self_mock._manager.checkpoint.called) diff --git a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py index 760eeaa054..e1a0ccdf6c 100644 --- a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py @@ -160,7 +160,13 @@ def test_resume_by_id_with_result_file_ok(self, mock_load_configs): mock__.side_effect = lambda x: x with mock.patch("os.path.exists", os_path_exists_mock): with mock.patch( - "builtins.open", mock.mock_open(read_data="pass") + "builtins.open", + mock.mock_open( + read_data="""{ + "outcome" : "pass", + "comments" : "Outcome loaded from file" + }""" + ), ): os_path_exists_mock.return_value = True remote_assistant.RemoteSessionAssistant.resume_by_id(rsa) @@ -168,7 +174,47 @@ def test_resume_by_id_with_result_file_ok(self, mock_load_configs): mjr = MemoryJobResult( { "outcome": IJobResult.OUTCOME_PASS, - "comments": "Automatically passed after resuming execution", + "comments": "Outcome loaded from file", + } + ) + rsa._sa.use_job_result.assert_called_with(rsa._last_job, mjr, True) + + @mock.patch("plainbox.impl.session.remote_assistant.load_configs") + def test_resume_by_id_with_result_file_garbage_outcome( + self, mock_load_configs + ): + rsa = mock.Mock() + resumable_session = mock.Mock() + resumable_session.id = "session_id" + rsa._sa.get_resumable_sessions.return_value = [resumable_session] + rsa.get_rerun_candidates.return_value = [] + rsa._state = remote_assistant.Idle + + mock_meta = mock.Mock() + mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' + + rsa._sa.resume_session.return_value = mock_meta + os_path_exists_mock = mock.Mock() + + with mock.patch("plainbox.impl.session.remote_assistant._") as mock__: + mock__.side_effect = lambda x: x + with mock.patch("os.path.exists", os_path_exists_mock): + with mock.patch( + "builtins.open", + mock.mock_open( + read_data="""{ + "outcome" : "unknown_value_for_outcome", + "comments" : "Outcome loaded from file" + }""" + ), + ): + os_path_exists_mock.return_value = True + remote_assistant.RemoteSessionAssistant.resume_by_id(rsa) + + mjr = MemoryJobResult( + { + "outcome": IJobResult.OUTCOME_PASS, + "comments": "Outcome loaded from file", } ) rsa._sa.use_job_result.assert_called_with(rsa._last_job, mjr, True) @@ -281,8 +327,7 @@ def test_remember_users_response_quit(self): self_mock, "quit" ) - self.assertEqual(self_mock._state, remote_assistant.Idle) - self.assertTrue(self_mock.finalize_session.called) + self.assertTrue(self_mock.abandon_session.called) def test_remember_users_response_rollback(self): self_mock = mock.MagicMock() @@ -304,6 +349,31 @@ def test_remember_users_response_run(self): self.assertEqual(self_mock._state, remote_assistant.Running) + def test_note_metadata_starting_job(self): + self_mock = mock.MagicMock() + remote_assistant.RemoteSessionAssistant.note_metadata_starting_job( + self_mock, mock.MagicMock(), mock.MagicMock() + ) + self.assertTrue(self_mock._sa.note_metadata_starting_job.called) + + def test_abandon_session(self): + self_mock = mock.MagicMock() + remote_assistant.RemoteSessionAssistant.abandon_session(self_mock) + self.assertTrue(self_mock._reset_sa.called) + + def test_delete_sessions(self): + self_mock = mock.MagicMock() + remote_assistant.RemoteSessionAssistant.delete_sessions(self_mock, []) + self.assertTrue(self_mock._sa.delete_sessions.called) + + def test_get_resumable_sessions(self): + self_mock = mock.MagicMock() + remote_assistant.RemoteSessionAssistant.get_resumable_sessions( + self_mock + ) + self.assertTrue(self_mock._sa.get_resumable_sessions.called) + + class RemoteAssistantFinishJobTests(TestCase): def setUp(self): self.rsa = mock.MagicMock() diff --git a/metabox/metabox/metabox-provider/units/cert-blocker-tps.pxu b/metabox/metabox/metabox-provider/units/cert-blocker-tps.pxu index e50c7fd3d9..a80da6f0b6 100644 --- a/metabox/metabox/metabox-provider/units/cert-blocker-tps.pxu +++ b/metabox/metabox/metabox-provider/units/cert-blocker-tps.pxu @@ -26,10 +26,10 @@ include: id: cert-blocker-manual-resume unit: test plan -_name: Manual cert-blocker test +_name: Manual cert-blocker interact test _description: Test that a job marked as cert-blocker cannot be skipped without a comment when resuming a session include: stub/split-fields/manual certification-status=blocker - stub/split-fields/user-interact + stub/split-fields/user-interact certification-status=blocker From f7f83b0ea3e87b19d6083ab2ddaa936f1e03ad52 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Tue, 20 Feb 2024 14:15:33 +0100 Subject: [PATCH 039/108] Always escape outdata in metabox output storage (infra) (#1001) * Always escape outdata in allout * Remove pointless utf8 sequence from test > Hi past me :) --- metabox/metabox/core/lxd_execute.py | 2 +- metabox/metabox/scenarios/basic/run-invocation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metabox/metabox/core/lxd_execute.py b/metabox/metabox/core/lxd_execute.py index d861f2d60d..0c333b137c 100644 --- a/metabox/metabox/core/lxd_execute.py +++ b/metabox/metabox/core/lxd_execute.py @@ -55,8 +55,8 @@ def received_message(self, message): self.close() self._connection_closed = True message_data_str = message.data.decode("utf-8", errors="ignore") + raw_msg = message_data_str = self.ansi_escape.sub("", message_data_str) if self.verbose: - raw_msg = self.ansi_escape.sub("", message_data_str) logger.trace(raw_msg.rstrip()) with self.stdout_lock: self.stdout_data += message_data_str diff --git a/metabox/metabox/scenarios/basic/run-invocation.py b/metabox/metabox/scenarios/basic/run-invocation.py index 76747aab27..8cc4e913d1 100644 --- a/metabox/metabox/scenarios/basic/run-invocation.py +++ b/metabox/metabox/scenarios/basic/run-invocation.py @@ -76,7 +76,7 @@ class RunManualplan(Scenario): Expect("Pick an action"), Send("p" + keys.KEY_ENTER), Expect( - " ☑ : A simple user interaction and verification job" + "A simple user interaction and verification job" ), ] From 9499564bb5eefe77108707079615e86b07a36ebe Mon Sep 17 00:00:00 2001 From: kissiel Date: Tue, 20 Feb 2024 14:51:37 +0100 Subject: [PATCH 040/108] Remove $SNAP_DATA refs from checkbox-support UT (BugFix) (#1002) remove $SNAP_DATA refs from checkbox-support UT --- .../checkbox_support/snap_utils/tests/test_config.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/checkbox-support/checkbox_support/snap_utils/tests/test_config.py b/checkbox-support/checkbox_support/snap_utils/tests/test_config.py index acccffa472..2588dd793c 100644 --- a/checkbox-support/checkbox_support/snap_utils/tests/test_config.py +++ b/checkbox-support/checkbox_support/snap_utils/tests/test_config.py @@ -124,6 +124,7 @@ def test_empty_on_missing(self, mock_run, mock_write, mock_conf_set): self.assertFalse(mock_write.called) self.assertFalse(mock_run.called) + @patch.dict('os.environ', {'SNAP_DATA': 'SNAP_DATA_VALUE'}) @patch('checkbox_support.snap_utils.config.get_configuration_set') @patch('subprocess.check_output') @patch('subprocess.run') @@ -144,7 +145,7 @@ def test_one_value(self, mock_run, mock_subproc, mock_conf_set): m = mock_open() with patch('builtins.open', m): refresh_configuration() - m.assert_called_with("$SNAP_DATA/checkbox.conf", "wt") + m.assert_called_with("SNAP_DATA_VALUE/checkbox.conf", "wt") m.return_value.write.assert_has_calls([ call('[environment]\n'), call('FOO = bar\n'), @@ -153,6 +154,7 @@ def test_one_value(self, mock_run, mock_subproc, mock_conf_set): self.assertTrue(mock_conf_set.called) mock_run.assert_called_once_with(['snapctl', 'set', 'conf.foo=bar']) + @patch.dict('os.environ', {'SNAP_DATA': 'SNAP_DATA_VALUE'}) @patch('checkbox_support.snap_utils.config.get_configuration_set') @patch('subprocess.check_output') @patch('subprocess.run') @@ -180,7 +182,7 @@ def test_one_value_overriden_by_config( m = mock_open() with patch('builtins.open', m): refresh_configuration() - m.assert_called_with("$SNAP_DATA/checkbox.conf", "wt") + m.assert_called_with("SNAP_DATA_VALUE/checkbox.conf", "wt") m.return_value.write.assert_has_calls([ call('[environment]\n'), call('FOO = bar\n'), @@ -188,6 +190,7 @@ def test_one_value_overriden_by_config( ]) mock_run.assert_called_once_with(['snapctl', 'set', 'conf.foo=bar']) + @patch.dict('os.environ', {'SNAP_DATA': 'SNAP_DATA_VALUE'}) @patch('checkbox_support.snap_utils.config.get_configuration_set') @patch('subprocess.check_output') @patch('subprocess.run') @@ -216,7 +219,7 @@ def test_one_new_one_existing( m = mock_open() with patch('builtins.open', m): refresh_configuration() - m.assert_called_with("$SNAP_DATA/checkbox.conf", "wt") + m.assert_called_with("SNAP_DATA_VALUE/checkbox.conf", "wt") m.return_value.write.assert_has_calls([ call('[environment]\n'), call('BIZ = baz\n'), From 4a6c730472ea372dc1692ab0f864293933361864 Mon Sep 17 00:00:00 2001 From: kissiel Date: Tue, 20 Feb 2024 20:41:13 +0100 Subject: [PATCH 041/108] ignore `dm` devices that serve only ubuntu-* partitions (BugFix) (#1003) ignore `dm` devices that serve only ubuntu-* partitions --- .../parsers/tests/test_udevadm.py | 12 ++++ .../two_dms_one_with_ubuntu_save.txt | 68 +++++++++++++++++++ .../checkbox_support/parsers/udevadm.py | 10 +++ 3 files changed, 90 insertions(+) create mode 100644 checkbox-support/checkbox_support/parsers/tests/udevadm_data/two_dms_one_with_ubuntu_save.txt diff --git a/checkbox-support/checkbox_support/parsers/tests/test_udevadm.py b/checkbox-support/checkbox_support/parsers/tests/test_udevadm.py index 40b24c6839..9ddb304e7e 100644 --- a/checkbox-support/checkbox_support/parsers/tests/test_udevadm.py +++ b/checkbox-support/checkbox_support/parsers/tests/test_udevadm.py @@ -1057,6 +1057,18 @@ def test_CRYPTO_FDE_UC20(self): self.assertEqual(len(devices), 93) self.assertEqual(self.count(devices, "PARTITION"), 1) + def test_two_dms_one_with_ubutnu_save(self): + """ + This test checks that udevadm parser properly handles the case + where two devices are present in the same udevadm output, but one + of them is a for recovery purposes and shouldn't be considered a + real device. See: + https://github.com/canonical/checkbox/issues/980 + For details. + """ + devices = self.parse("two_dms_one_with_ubuntu_save") + self.assertEqual(len(devices), 1) + def verify_devices(self, devices, expected_device_list): """ Verify we have the expected quantity of each device given in the list, diff --git a/checkbox-support/checkbox_support/parsers/tests/udevadm_data/two_dms_one_with_ubuntu_save.txt b/checkbox-support/checkbox_support/parsers/tests/udevadm_data/two_dms_one_with_ubuntu_save.txt new file mode 100644 index 0000000000..3e41885ac5 --- /dev/null +++ b/checkbox-support/checkbox_support/parsers/tests/udevadm_data/two_dms_one_with_ubuntu_save.txt @@ -0,0 +1,68 @@ + +P: /devices/virtual/block/dm-0 +N: dm-0 +L: 0 +S: disk/by-uuid/8f828a83-40da-4d3b-b823-b3b646a3bcd8 +S: disk/by-id/dm-uuid-CRYPT-LUKS2-839bc7f700d7450b89492ffb23366778-ubuntu-data-150732d6-cce3-4631-a568-024c719c37a8 +S: disk/by-id/dm-name-ubuntu-data-150732d6-cce3-4631-a568-024c719c37a8 +S: disk/by-label/ubuntu-data +S: mapper/ubuntu-data-150732d6-cce3-4631-a568-024c719c37a8 +E: DEVPATH=/devices/virtual/block/dm-0 +E: SUBSYSTEM=block +E: DEVNAME=/dev/dm-0 +E: DEVTYPE=disk +E: DISKSEQ=10 +E: MAJOR=253 +E: MINOR=0 +E: USEC_INITIALIZED=6678800 +E: DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 +E: DM_UDEV_PRIMARY_SOURCE_FLAG=1 +E: DM_UDEV_RULES=1 +E: DM_UDEV_RULES_VSN=2 +E: DM_ACTIVATION=1 +E: DM_NAME=ubuntu-data-150732d6-cce3-4631-a568-024c719c37a8 +E: DM_UUID=CRYPT-LUKS2-839bc7f700d7450b89492ffb23366778-ubuntu-data-150732d6-cce3-4631-a568-024c719c37a8 +E: DM_SUSPENDED=0 +E: ID_FS_LABEL=ubuntu-data +E: ID_FS_LABEL_ENC=ubuntu-data +E: ID_FS_UUID=8f828a83-40da-4d3b-b823-b3b646a3bcd8 +E: ID_FS_UUID_ENC=8f828a83-40da-4d3b-b823-b3b646a3bcd8 +E: ID_FS_VERSION=1.0 +E: ID_FS_TYPE=ext4 +E: ID_FS_USAGE=filesystem +E: DEVLINKS=/dev/disk/by-uuid/8f828a83-40da-4d3b-b823-b3b646a3bcd8 /dev/disk/by-id/dm-uuid-CRYPT-LUKS2-839bc7f700d7450b89492ffb23366778-ubuntu-data-150732d6-cce3-4631-a568-024c719c37a8 /dev/disk/by-id/dm-name-ubuntu-data-150732d6-cce3-4631-a568-024c719c37a8 /dev/disk/by-label/ubuntu-data /dev/mapper/ubuntu-data-150732d6-cce3-4631-a568-024c719c37a8 +E: TAGS=:systemd: + +P: /devices/virtual/block/dm-1 +N: dm-1 +L: 0 +S: disk/by-uuid/d9a828fe-77f2-44df-81c9-33fbe8f8e5c7 +S: disk/by-id/dm-name-ubuntu-save-b2d747ba-f8d9-4381-849e-46fa1cc10d28 +S: disk/by-label/ubuntu-save +S: disk/by-id/dm-uuid-CRYPT-LUKS2-3ea3e2b191824291847777eedecee331-ubuntu-save-b2d747ba-f8d9-4381-849e-46fa1cc10d28 +S: mapper/ubuntu-save-b2d747ba-f8d9-4381-849e-46fa1cc10d28 +E: DEVPATH=/devices/virtual/block/dm-1 +E: SUBSYSTEM=block +E: DEVNAME=/dev/dm-1 +E: DEVTYPE=disk +E: DISKSEQ=11 +E: MAJOR=253 +E: MINOR=1 +E: USEC_INITIALIZED=6769859 +E: DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 +E: DM_UDEV_PRIMARY_SOURCE_FLAG=1 +E: DM_UDEV_RULES=1 +E: DM_UDEV_RULES_VSN=2 +E: DM_ACTIVATION=1 +E: DM_NAME=ubuntu-save-b2d747ba-f8d9-4381-849e-46fa1cc10d28 +E: DM_UUID=CRYPT-LUKS2-3ea3e2b191824291847777eedecee331-ubuntu-save-b2d747ba-f8d9-4381-849e-46fa1cc10d28 +E: DM_SUSPENDED=0 +E: ID_FS_LABEL=ubuntu-save +E: ID_FS_LABEL_ENC=ubuntu-save +E: ID_FS_UUID=d9a828fe-77f2-44df-81c9-33fbe8f8e5c7 +E: ID_FS_UUID_ENC=d9a828fe-77f2-44df-81c9-33fbe8f8e5c7 +E: ID_FS_VERSION=1.0 +E: ID_FS_TYPE=ext4 +E: ID_FS_USAGE=filesystem +E: DEVLINKS=/dev/disk/by-uuid/d9a828fe-77f2-44df-81c9-33fbe8f8e5c7 /dev/disk/by-id/dm-name-ubuntu-save-b2d747ba-f8d9-4381-849e-46fa1cc10d28 /dev/disk/by-label/ubuntu-save /dev/disk/by-id/dm-uuid-CRYPT-LUKS2-3ea3e2b191824291847777eedecee331-ubuntu-save-b2d747ba-f8d9-4381-849e-46fa1cc10d28 /dev/mapper/ubuntu-save-b2d747ba-f8d9-4381-849e-46fa1cc10d28 +E: TAGS=:systemd: \ No newline at end of file diff --git a/checkbox-support/checkbox_support/parsers/udevadm.py b/checkbox-support/checkbox_support/parsers/udevadm.py index 158f965e3e..bba105446c 100644 --- a/checkbox-support/checkbox_support/parsers/udevadm.py +++ b/checkbox-support/checkbox_support/parsers/udevadm.py @@ -1138,6 +1138,16 @@ def _ignoreDevice(self, device): if "ID_FS_USAGE" in device._environment: if device._environment["ID_FS_USAGE"] != 'filesystem': return True + # Some of the MD devices are there only to host "service" + # partitions that should not be considered disks when running + # Checkbox. For more details see: + # https://github.com/canonical/checkbox/issues/980 + # the following partition names, if found on the dm-* device + # will make the dm-* device not to be reported as a disk + IGNORED_PARTITIONS = [ + "ubuntu-save", "ubuntu-boot", "ubuntu-seed"] + if device._environment.get("ID_FS_LABEL") in IGNORED_PARTITIONS: + return True return False # Keep /dev/md* devices (Multiple Disks aka Software RAID) From 5680abc1bb9df060149f0b9c6cb53b54cafea714 Mon Sep 17 00:00:00 2001 From: stanley31huang Date: Wed, 21 Feb 2024 15:36:10 +0800 Subject: [PATCH 042/108] Refactor LED tests (BugFix) (#905) * Fix: refactor the LED tests scripts refactor the LED tests scripts from Shell scripts to Python scripts. * Add: adding dump gpio information function adding a function to dump all GPIO information * Fix: refactor scripts and unittest scripts update script and unittest scripts * Fix: corrected the gpiochip number corrected the gpiochip number * fix: fixed shell check error fixed shell check error * Fix: added unit tests added unit tests * Update providers/base/bin/gpio_control_test.py Co-authored-by: Pierre Equoy * Update providers/base/bin/gpio_control_test.py Co-authored-by: Pierre Equoy * updated scripts and fixed typo - updated the gpio_controller_test scripts to parse LED resource - fixed typo in led resource job * Update providers/base/tests/test_gpio_control_test.py Co-authored-by: Pierre Equoy * Update providers/base/tests/test_gpio_control_test.py Co-authored-by: Pierre Equoy * Update providers/base/tests/test_gpio_control_test.py Co-authored-by: Pierre Equoy * Update providers/base/tests/test_gpio_control_test.py Co-authored-by: Pierre Equoy * fixed missing module issues import StringIO and redirect_stdout libraries --------- Co-authored-by: Pierre Equoy --- providers/base/bin/gpio_control_test.py | 313 +++++++++++++++ providers/base/bin/led_control_test.py | 194 ++++++++++ .../base/tests/test_gpio_control_test.py | 356 ++++++++++++++++++ providers/base/tests/test_led_control_test.py | 154 ++++++++ providers/base/units/led/jobs.pxu | 42 ++- providers/base/units/led/test-plan.pxu | 12 +- 6 files changed, 1068 insertions(+), 3 deletions(-) create mode 100755 providers/base/bin/gpio_control_test.py create mode 100755 providers/base/bin/led_control_test.py create mode 100644 providers/base/tests/test_gpio_control_test.py create mode 100644 providers/base/tests/test_led_control_test.py diff --git a/providers/base/bin/gpio_control_test.py b/providers/base/bin/gpio_control_test.py new file mode 100755 index 0000000000..ab668810b5 --- /dev/null +++ b/providers/base/bin/gpio_control_test.py @@ -0,0 +1,313 @@ +#!/usr/bin/env python3 +import re +import sys +import time +import logging +import argparse +from pathlib import Path +from datetime import datetime + + +class GPIOController: + + GPIO_ROOT_PATH = "/sys/class/gpio" + GPIO_EXPORT_PATH = "{}/export".format(GPIO_ROOT_PATH) + GPIO_UNEXPORT_PATH = "{}/unexport".format(GPIO_ROOT_PATH) + + def __init__(self, gpiochip: str, gpiopin: str, + direction: int, need_export: bool): + if gpiochip.isnumeric() is False or gpiopin.isnumeric() is False: + raise ValueError("Invalid GPIO chip or GPIO pin") + + self._gpio_root_node = Path(self.GPIO_ROOT_PATH) + self._gpiochip_mapping = self.get_gpiochip_mapping() + + if gpiochip not in self._gpiochip_mapping.keys(): + raise KeyError("GPIO chip number {} is incorrect".format(gpiochip)) + + self.gpio_chip_node = self._gpio_root_node.joinpath( + "gpiochip{}".format( + self._gpiochip_mapping.get(gpiochip))) + self.gpio_node = self.value_node = None + self.gpiochip_info = {"base": None, "ngpio": None, "offset": gpiopin} + self._direction = direction + self._need_export = need_export + self.initial_state = { + "value": None, "direction": None, "number": None + } + + def __enter__(self): + self.setup() + return self + + def __exit__(self, type, value, traceback): + self.teardown() + + def check_gpio_offset(self, pin: str, ngpio: str): + if int(pin) == 0: + raise ValueError("") + + if int(pin) > int(ngpio): + raise IndexError( + "GPIO pin '{}' greater than ngpio value '{}'".format( + pin, ngpio)) + + def get_gpiochip_mapping(self): + mapping = {} + nodes = sorted( + self._gpio_root_node.glob("gpiochip*/device/gpiochip*")) + for node in nodes: + match = re.search( + r"/sys/class/gpio/gpiochip([0-9]+)/device/gpiochip([0-9]+)", + str(node)) + if match: + mapping.update({match.groups()[1]: match.groups()[0]}) + return mapping + + def setup(self): + logging.debug("setup action for GPIO testing") + for key in ["base", "ngpio"]: + with self.gpio_chip_node.joinpath(key) as gpio_node: + if self._node_exists(gpio_node) is False: + raise FileNotFoundError( + "{} file not exists".format(str(gpio_node))) + self.gpiochip_info[key] = self._read_node(gpio_node) + + self.check_gpio_offset( + self.gpiochip_info["offset"], self.gpiochip_info["ngpio"] + ) + + self.initial_state["number"] = str(( + int(self.gpiochip_info["base"]) + + int(self.gpiochip_info["offset"]) - 1 + )) + self.gpio_node = self._gpio_root_node.joinpath( + "gpio{}".format(self.initial_state["number"])) + + # Export GPIO node if needed + if self._need_export: + self._export(self.initial_state["number"]) + time.sleep(1) + + if self._node_exists(self.gpio_node) is False: + raise FileNotFoundError( + "{} file not exists".format(str(self.gpio_node))) + + # Store the initial state for GPIO + self.initial_state["value"] = self.value + self.initial_state["direction"] = self.direction + + # Configure the GPIO direction + self.direction = self._direction + + def teardown(self): + logging.debug("teardown action for LED testing") + self.value = self.initial_state["value"] + self.direction = self.initial_state["direction"] + if self._need_export: + self._unexport(self.initial_state["number"]) + + def _node_exists(self, node: Path): + if node.exists() is False: + raise FileNotFoundError("{} file not exists".format(str(node))) + + def _read_node(self, node: Path): + self._node_exists(node) + return node.read_text().strip("\n") + + def _write_node(self, node: Path, value: str, check=True): + + self._node_exists(node) + node.write_text(value) + if check and self._read_node(node) != value: + raise ValueError( + "Unable to change the value of {} file".format(str(node))) + + def _export(self, gpio_number: str): + logging.debug("export GPIO node %s", gpio_number) + with Path(self.GPIO_EXPORT_PATH) as gpio_node: + self._write_node(gpio_node, gpio_number, False) + + def _unexport(self, gpio_number: str): + logging.debug("unexport GPIO node %s", gpio_number) + with Path(self.GPIO_UNEXPORT_PATH) as gpio_node: + self._write_node(gpio_node, gpio_number, False) + + @property + def direction(self): + with self.gpio_node.joinpath("direction") as gpio_node: + return self._read_node(gpio_node) + + @direction.setter + def direction(self, value: str): + if value not in ["in", "out"]: + raise ValueError( + "The {} is not allowed for direction".format(value)) + + with self.gpio_node.joinpath("direction") as gpio_node: + logging.debug("set direction to {} for {}".format( + value, gpio_node.name + )) + self._write_node(gpio_node, value) + + @property + def value(self): + with self.gpio_node.joinpath("value") as gpio_node: + return self._read_node(gpio_node) + + @value.setter + def value(self, value: str): + if value not in ["1", "0"]: + raise ValueError( + "The {} is not allowed for value".format(value)) + + with self.gpio_node.joinpath("value") as gpio_node: + logging.debug("set value to {} for {}".format( + value, gpio_node.name + )) + self._write_node(gpio_node, value) + + def on(self): + logging.debug("turn on GPIO") + self.value = "1" + + def off(self): + logging.debug("turn off GPIO") + self.value = "0" + + def blinking(self, duration=10, interval=1): + logging.debug( + "set GPIO{} LED blinking".format(self.initial_state["number"])) + start_time = datetime.now() + while (datetime.now() - start_time).total_seconds() <= duration: + self.on() + time.sleep(interval) + self.off() + time.sleep(interval) + + +def blinking_test(args): + + with GPIOController(args.gpio_chip, args.gpio_pin, + "out", args.need_export) as led_controller: + logging.info(("# Set the {} LED blinking around {} seconds " + "with {} seconds blink interval").format( + args.name, args.duration, args.interval)) + led_controller.blinking(args.duration, args.interval) + + +def dump_gpiochip(args): + gpio_debug_path = "/sys/kernel/debug/gpio" + + gpio_debug = Path(gpio_debug_path) + if gpio_debug.exists(): + print(gpio_debug.read_text()) + else: + raise FileNotFoundError("{} file not exists".format(str(gpio_debug))) + + +def leds_resource(args): + output = "" + resource_text = "name: {}\nchip_number: {}\nport: {}\n\n" + for led in args.mapping.split(): + name, chip_number, port = led.split(":") + output += resource_text.format(name, chip_number, port) + print(output) + + +def register_arguments(): + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description='GPIO Control Tests') + parser.add_argument( + "--debug", + action="store_true", + help="Turn on debug level output for extra info during test run.", + ) + + sub_parsers = parser.add_subparsers(help="GPIO test type", + dest="test_func") + sub_parsers.required = True + + gpio_led_parser = sub_parsers.add_parser("led") + gpio_led_parser.add_argument( + "-n", "--name", + required=True, + type=str + ) + gpio_led_parser.add_argument( + "-d", "--duration", + type=int, + default=5 + ) + gpio_led_parser.add_argument( + "-i", "--interval", + type=int, + default=0.5 + ) + gpio_led_parser.add_argument( + "--gpio-chip", + type=str, + required=True + ) + gpio_led_parser.add_argument( + "--gpio-pin", + type=str, + required=True + ) + gpio_led_parser.add_argument( + "--need-export", + action="store_true", + default=False + ) + gpio_led_parser.set_defaults(test_func=blinking_test) + + gpio_dump_parser = sub_parsers.add_parser("dump") + gpio_dump_parser.set_defaults(test_func=dump_gpiochip) + + gpio_args_parser = sub_parsers.add_parser("led-resource") + gpio_args_parser.set_defaults(test_func=leds_resource) + gpio_args_parser.add_argument( + "mapping", + help=("Usage of parameter: GPIO_CONTROLLER_LEDS=" + "{name1}:{controller1}:{port1} " + "{name2}:{controller1}:{port2} ..."), + ) + + args = parser.parse_args() + return args + + +if __name__ == "__main__": + + root_logger = logging.getLogger() + root_logger.setLevel(logging.INFO) + logger_format = "%(asctime)s %(levelname)-8s %(message)s" + date_format = "%Y-%m-%d %H:%M:%S" + + # Log DEBUG and INFO to stdout, others to stderr + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setFormatter(logging.Formatter(logger_format, date_format)) + + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.setFormatter(logging.Formatter(logger_format, date_format)) + + stdout_handler.setLevel(logging.DEBUG) + stderr_handler.setLevel(logging.WARNING) + + # Add a filter to the stdout handler to limit log records to + # INFO level and below + stdout_handler.addFilter(lambda record: record.levelno <= logging.INFO) + + root_logger.addHandler(stderr_handler) + root_logger.addHandler(stdout_handler) + + args = register_arguments() + + if args.debug: + root_logger.setLevel(logging.DEBUG) + + try: + args.test_func(args) + except Exception as err: + logging.error(err) diff --git a/providers/base/bin/led_control_test.py b/providers/base/bin/led_control_test.py new file mode 100755 index 0000000000..858a2865a8 --- /dev/null +++ b/providers/base/bin/led_control_test.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 + +import sys +import re +import time +import logging +import argparse +from pathlib import Path +from datetime import datetime + + +class SysFsLEDController(): + + SysFsLEDPath = "/sys/class/leds" + + def __init__(self, name, on_value="0", off_value="0"): + self.led_name = name + self.led_node = Path(self.SysFsLEDPath).joinpath(name) + self.brightness_node = self.led_node.joinpath("brightness") + self.max_brightness_node = self.led_node.joinpath("max_brightness") + self.trigger_node = self.led_node.joinpath("trigger") + self.initial_state = {"trigger": None, "brightness": None} + self._on_value = on_value + self._off_value = off_value + + def __enter__(self): + self.setup() + return self + + def __exit__(self, type, value, traceback): + self.teardown() + + def setup(self): + logging.debug("setup action for LED testing") + + if int(self._on_value) > int(self.max_brightness): + raise ValueError("brightness value greater than max brightness") + if self._on_value == "0": + self._on_value = self.max_brightness + + # Get the initial value of trigger and brightness + self._get_initial_state() + # Set the trigger type to none + self.trigger = "none" + self.off() + + def teardown(self): + logging.debug("teardown action for LED testing") + initial_state = self.initial_state + self.brightness = initial_state["brightness"] + self.trigger = initial_state["trigger"] + + def _node_exists(self, node): + if node.exists() is False: + raise FileNotFoundError("{} file not exists".format(str(node))) + + def _read_node(self, node): + self._node_exists(node) + return node.read_text().strip("\n") + + def _write_node(self, node, value, check=True): + + self._node_exists(node) + node.write_text(value) + if check and self._read_node(node) != value: + raise ValueError( + "Unable to change the value of {} file".format(str(node))) + + @property + def brightness(self): + return self._read_node(self.brightness_node) + + @brightness.setter + def brightness(self, value): + logging.debug("set brightness to %s for %s LED", value, self.led_name) + self._write_node(self.brightness_node, value) + + @property + def max_brightness(self): + return self._read_node(self.max_brightness_node) + + @property + def trigger(self): + return self._read_node(self.trigger_node) + + @trigger.setter + def trigger(self, value): + logging.debug("set trigger action to {} for {} LED".format( + value, self.led_name + )) + # The read value from trigger node is all supported trigger type + # So skip the check + self._write_node(self.trigger_node, value, False) + + def _get_initial_state(self): + match = re.search(r"\[([\w-]+)\]", self.trigger) + if match: + self.initial_state["trigger"] = match.groups()[0] + + self.initial_state["brightness"] = self.brightness + + def on(self): + logging.debug("turn on {} LED".format(self.led_name)) + self.brightness = self._on_value + + def off(self): + logging.debug("turn off {} LED".format(self.led_name)) + self.brightness = self._off_value + + def blinking(self, duration=10, interval=1): + logging.debug("set {} LED blinking".format(self.led_name)) + start_time = datetime.now() + while (datetime.now() - start_time).total_seconds() <= duration: + self.on() + time.sleep(interval) + self.off() + time.sleep(interval) + + +def register_arguments(): + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description='LED Tests') + parser.add_argument( + "-n", "--name", + required=True, + type=str + ) + parser.add_argument( + "-d", "--duration", + type=int, + default=5 + ) + parser.add_argument( + "-i", "--interval", + type=int, + default=0.5 + ) + parser.add_argument( + "--on-value", + type=int, + default="0" + ) + parser.add_argument( + "--off-value", + type=int, + default="0" + ) + parser.add_argument( + "--debug", + action="store_true", + help="Turn on debug level output for extra info during test run.", + ) + + args = parser.parse_args() + return args + + +if __name__ == "__main__": + + root_logger = logging.getLogger() + root_logger.setLevel(logging.INFO) + logger_format = "%(asctime)s %(levelname)-8s %(message)s" + date_format = "%Y-%m-%d %H:%M:%S" + + # Log DEBUG and INFO to stdout, others to stderr + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setFormatter(logging.Formatter(logger_format, date_format)) + + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.setFormatter(logging.Formatter(logger_format, date_format)) + + stdout_handler.setLevel(logging.DEBUG) + stderr_handler.setLevel(logging.WARNING) + + # Add a filter to the stdout handler to limit log records to + # INFO level and below + stdout_handler.addFilter(lambda record: record.levelno <= logging.INFO) + + root_logger.addHandler(stderr_handler) + root_logger.addHandler(stdout_handler) + + args = register_arguments() + if args.debug: + root_logger.setLevel(logging.DEBUG) + + logging.info("# Start LED testing") + logging.info(("# Set the %s LED blinking around %d seconds" + "with %f seconds blink interval"), + args.name, args.duration, args.interval) + + with SysFsLEDController(args.name, str(args.on_value), + str(args.off_value)) as led_controller: + led_controller.blinking(args.duration, args.interval) diff --git a/providers/base/tests/test_gpio_control_test.py b/providers/base/tests/test_gpio_control_test.py new file mode 100644 index 0000000000..917abb0d2d --- /dev/null +++ b/providers/base/tests/test_gpio_control_test.py @@ -0,0 +1,356 @@ +import unittest +import sys +import argparse +from unittest.mock import patch, Mock +from pathlib import PosixPath, Path +from io import StringIO +from contextlib import redirect_stdout +from gpio_control_test import GPIOController +from gpio_control_test import blinking_test +from gpio_control_test import dump_gpiochip +from gpio_control_test import leds_resource +from gpio_control_test import register_arguments + + +class TestGPIOController(unittest.TestCase): + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController._unexport") + @patch("gpio_control_test.GPIOController._export") + @patch("gpio_control_test.GPIOController._write_node") + @patch("gpio_control_test.GPIOController._read_node") + @patch("gpio_control_test.GPIOController._node_exists") + @patch("gpio_control_test.time.sleep", new=Mock) + def test_initial_gpio_controller_success( + self, mock_path, mock_read, mock_write, + mock_export, mock_unexport, mock_mapping): + mock_path.return_value = True + mock_read.side_effect = ["32", "16", "0", "in"] + mock_mapping.return_value = {"1": "32"} + + with GPIOController("1", "1", "in", True) as gpio_controller: + mock_path.assert_called_with(gpio_controller.gpio_node) + mock_read.assert_called_with( + gpio_controller.gpio_node.joinpath("direction")) + mock_write.assert_called_with( + gpio_controller.gpio_node.joinpath("direction"), "in") + self.assertIsInstance(gpio_controller.gpio_chip_node, PosixPath) + self.assertEqual(gpio_controller.gpio_chip_node.name, "gpiochip32") + self.assertDictEqual( + gpio_controller.gpiochip_info, + {"base": "32", "ngpio": "16", "offset": "1"}) + self.assertDictEqual( + gpio_controller.initial_state, + {"value": "0", "direction": "in", "number": "32"}) + + def test_initial_gpio_controller_with_invalid_gpiochip(self): + with self.assertRaises(ValueError): + with GPIOController("6a", "1", "in", True) as gpio_controller: + gpio_controller.gpio_chip_node + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + def test_initial_gpio_controller_with_notexist_gpiochip(self, + mock_mapping): + mock_mapping.return_value = {"0": "32"} + with self.assertRaises(KeyError): + GPIOController("1", "1", "in", True) + + def test_initial_gpio_controller_with_invalid_gpiopin(self): + with self.assertRaises(ValueError): + with GPIOController("12", "1a", "in", True) as _: + pass + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController._read_node") + @patch("gpio_control_test.GPIOController._node_exists") + def test_initial_gpio_controller_with_notexist_gpiopin( + self, mock_path, mock_read, mock_mapping): + mock_read.side_effect = ["32", "16", "0", "in"] + mock_mapping.return_value = {"0": "32"} + with self.assertRaises(ValueError): + gpio_conn = GPIOController("0", "0", "in", True) + gpio_conn.setup() + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController._unexport") + @patch("gpio_control_test.GPIOController._export") + @patch("gpio_control_test.GPIOController._write_node") + @patch("gpio_control_test.GPIOController._read_node") + @patch("gpio_control_test.GPIOController._node_exists") + def test_initial_gpiopin_exceeds_maximum( + self, mock_path, mock_read, mock_write, + mock_export, mock_unexport, mock_mapping): + mock_path.return_value = True + mock_read.side_effect = ["32", "16", "0", "in"] + mock_mapping.return_value = {"1": "32"} + + with self.assertRaises(IndexError): + with GPIOController("1", "18", "in", True) as gpio_controller: + mock_path.assert_called_with(gpio_controller.gpio_node) + mock_read.assert_called_with( + gpio_controller.gpio_node.joinpath("direction")) + + @patch("pathlib.Path.glob") + def test_get_gpiochip_mapping(self, mock_glob): + expected_data = {"0": "32", "1": "96"} + mock_glob.return_value = [ + "/sys/class/gpio/gpiochip32/device/gpiochip0", + "/sys/class/gpio/gpiochip96/device/gpiochip1" + ] + + gpio_conn = GPIOController("0", "1", "out", True) + self.assertDictEqual(gpio_conn.get_gpiochip_mapping(), + expected_data) + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController._read_node") + @patch("gpio_control_test.GPIOController._node_exists") + def test_setup_failed_by_ngpio_not_available( + self, mock_path, mock_read, mock_mapping): + mock_mapping.return_value = {"0": "32"} + mock_path.side_effect = [True, False] + mock_read.return_value = "32" + + gpio_conn = GPIOController("0", "1", "out", False) + with self.assertRaises(FileNotFoundError): + gpio_conn.setup() + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController._read_node") + @patch("gpio_control_test.GPIOController._node_exists") + def test_setup_failed_gpionode_not_available( + self, mock_path, mock_read, mock_mapping): + mock_mapping.return_value = {"0": "32"} + mock_path.side_effect = [True, True, False] + mock_read.side_effect = ["32", "16"] + + gpio_conn = GPIOController("0", "1", "out", False) + with self.assertRaises(FileNotFoundError): + gpio_conn.setup() + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("pathlib.Path.exists") + def test_file_not_exists(self, mock_path, mock_mapping): + mock_path.return_value = False + mock_mapping.return_value = {"1": "32"} + gpio_controller = GPIOController("1", "1", "in", True) + with self.assertRaises(FileNotFoundError): + gpio_controller._node_exists(Path("test-fake")) + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("pathlib.Path.read_text") + @patch("pathlib.Path.exists") + def test_read_file(self, mock_path, mock_read, mock_mapping): + expected_result = "test-string" + mock_path.return_value = True + mock_read.return_value = expected_result + mock_mapping.return_value = {"1": "32"} + gpio_controller = GPIOController("1", "1", "in", True) + self.assertEqual( + expected_result, + gpio_controller._read_node(gpio_controller.gpio_chip_node)) + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("pathlib.Path.write_text") + @patch("pathlib.Path.read_text") + @patch("pathlib.Path.exists") + def test_write_failed(self, mock_path, mock_read, + mock_write, mock_mapping): + read_value = "33" + write_value = "22" + + mock_path.return_value = True + mock_read.return_value = read_value + mock_mapping.return_value = {"1": "32"} + + with self.assertRaises(ValueError): + gpio_controller = GPIOController("1", "1", "out", True) + gpio_controller._write_node(Path("test-fake"), write_value, True) + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("pathlib.Path.write_text") + @patch("pathlib.Path.read_text") + @patch("pathlib.Path.exists") + def test_write_passed(self, mock_path, + mock_read, mock_write, mock_mapping): + read_value = "33" + write_value = "33" + mock_path.return_value = True + mock_read.return_value = read_value + mock_mapping.return_value = {"1": "32"} + + gpio_controller = GPIOController("1", "1", "out", True) + gpio_controller._write_node(Path("test-fake"), write_value, True) + + mock_path.assert_called_with() + mock_read.assert_called_once_with() + mock_write.assert_called_once_with("33") + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController._write_node") + def test_export_gpio_node(self, mock_write, mock_mapping): + mock_mapping.return_value = {"1": "32"} + + gpio_no = "30" + gpio_controller = GPIOController("1", "1", "out", True) + gpio_controller._export(gpio_no) + mock_write.assert_called_once_with( + PosixPath('/sys/class/gpio/export'), gpio_no, False) + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController._write_node") + def test_unexport_gpio_node(self, mock_write, mock_mapping): + mock_mapping.return_value = {"1": "32"} + + gpio_no = "30" + gpio_controller = GPIOController("1", "1", "out", True) + gpio_controller._unexport(gpio_no) + mock_write.assert_called_once_with( + PosixPath('/sys/class/gpio/unexport'), gpio_no, False) + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController._read_node") + @patch("gpio_control_test.GPIOController._node_exists") + def test_get_gpio_direction(self, mock_path, mock_path_get, mock_mapping): + mock_path.return_value = True + mock_path_get.return_value = "out" + mock_mapping.return_value = {"1": "32"} + + gpio_controller = GPIOController("1", "1", "out", True) + gpio_controller.gpio_node = Path("fake-node") + self.assertEqual(gpio_controller.direction, "out") + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + def test_set_gpio_direction_failed(self, mock_mapping): + mock_mapping.return_value = {"1": "32"} + + with self.assertRaises(ValueError): + gpio_controller = GPIOController("1", "1", "out", True) + gpio_controller.direction = "wrong_direction" + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController._read_node") + @patch("gpio_control_test.GPIOController._node_exists") + def test_get_gpio_value(self, mock_path, mock_path_get, mock_mapping): + mock_path.return_value = True + mock_path_get.return_value = "1" + mock_mapping.return_value = {"1": "32"} + + gpio_controller = GPIOController("1", "1", "out", True) + gpio_controller.gpio_node = Path("fake-node") + self.assertEqual(gpio_controller.value, "1") + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + def test_set_gpio_value_failed(self, mock_mapping): + mock_mapping.return_value = {"1": "32"} + + with self.assertRaises(ValueError): + gpio_controller = GPIOController("1", "1", "out", True) + gpio_controller.value = "wrong_value" + + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController.off") + @patch("gpio_control_test.GPIOController.on") + def test_blinking_function(self, mock_on, mock_off, mock_mapping): + mock_mapping.return_value = {"1": "32"} + + gpio_controller = GPIOController("1", "1", "out", True) + gpio_controller.blinking(0.0001, 0.0001) + mock_on.assert_called_with() + mock_off.assert_called_with() + + +class TestMainFunction(unittest.TestCase): + + @patch("gpio_control_test.GPIOController._write_node") + @patch("gpio_control_test.GPIOController._read_node") + @patch("gpio_control_test.GPIOController._node_exists") + @patch("gpio_control_test.GPIOController.get_gpiochip_mapping") + @patch("gpio_control_test.GPIOController.blinking") + def test_blinking_test(self, mock_blinking, mock_mapping, + mock_path, mock_read, mock_write): + mock_args = Mock( + return_value=argparse.Namespace( + name="fake-node", duration=5, interval=0.5, + gpio_chip="1", gpio_pin="1", need_export=False)) + mock_mapping.return_value = {"1": "32"} + mock_path.return_value = True + mock_read.side_effect = ["32", "16", "1", "out"] + + blinking_test(mock_args()) + mock_blinking.assert_called_once_with( + mock_args().duration, mock_args().interval) + + @patch("pathlib.Path.read_text") + @patch("pathlib.Path.exists") + def test_dump_gpiochip_test(self, mock_path, mock_read): + mock_path.return_value = True + mock_read.return_value = "mock-string" + + with redirect_stdout(StringIO()) as stdout: + dump_gpiochip(None) + mock_path.assert_called_once_with() + mock_read.assert_called_once_with() + + @patch("pathlib.Path.exists") + def test_dump_gpiochip_test_failed(self, mock_path): + mock_path.return_value = False + + with self.assertRaises(FileNotFoundError) as context: + dump_gpiochip(None) + + self.assertEqual( + str(context.exception), "/sys/kernel/debug/gpio file not exists" + ) + + def test_led_resource(self): + mock_args = Mock( + return_value=argparse.Namespace(mapping="DL14:5:1 DL14:5:2")) + with redirect_stdout(StringIO()) as stdout: + leds_resource(mock_args()) + + def test_led_resource_with_unexpected_format(self): + mock_args = Mock( + return_value=argparse.Namespace(mapping="DL14-5:1")) + + with self.assertRaises(ValueError) as context: + leds_resource(mock_args()) + + self.assertEqual( + str(context.exception), + "not enough values to unpack (expected 3, got 2)" + ) + + +class TestArgumentParser(unittest.TestCase): + + def test_led_parser(self): + sys.argv = [ + "gpio_control_test.py", "--debug", "led", "-n", "fake-led", + "--gpio-chip", "3", "--gpio-pin", "5", "--need-export", + "-d", "30", "-i", "2" + + ] + args = register_arguments() + + self.assertEqual(args.test_func, blinking_test) + self.assertEqual(args.debug, True) + self.assertEqual(args.name, "fake-led") + self.assertEqual(args.duration, 30) + self.assertEqual(args.interval, 2) + self.assertEqual(args.gpio_chip, "3") + self.assertEqual(args.gpio_pin, "5") + + def test_dump_parser(self): + sys.argv = ["gpio_control_test.py", "dump"] + args = register_arguments() + + self.assertEqual(args.test_func, dump_gpiochip) + self.assertEqual(args.debug, False) + + def test_led_resource_parser(self): + sys.argv = ["gpio_control_test.py", "led-resource", "DL14:5:1"] + args = register_arguments() + + self.assertEqual(args.test_func, leds_resource) + self.assertEqual(args.debug, False) diff --git a/providers/base/tests/test_led_control_test.py b/providers/base/tests/test_led_control_test.py new file mode 100644 index 0000000000..64054891a7 --- /dev/null +++ b/providers/base/tests/test_led_control_test.py @@ -0,0 +1,154 @@ +import sys +import unittest +from unittest.mock import patch, Mock +from pathlib import PosixPath, Path +from led_control_test import SysFsLEDController +from led_control_test import register_arguments + + +class TestSysFsLEDController(unittest.TestCase): + + def setUp(self): + + self.led_controller = SysFsLEDController("test-fake") + + @patch("led_control_test.SysFsLEDController._read_node") + @patch("led_control_test.SysFsLEDController._node_exists") + def test_setup_failed(self, mock_path, mock_read): + mock_path.return_value = False + mock_read.return_value = "30" + self.led_controller._on_value = "50" + + with self.assertRaises(ValueError): + self.led_controller.setup() + + @patch("led_control_test.SysFsLEDController._write_node") + @patch("led_control_test.SysFsLEDController._read_node") + @patch("led_control_test.SysFsLEDController._node_exists") + def test_setup_passed(self, mock_path, mock_read, mock_write): + mock_path.return_value = True + mock_read.return_value = "30" + + self.led_controller.setup() + self.assertEqual(self.led_controller._on_value, + mock_read.return_value) + + @patch("pathlib.Path.exists") + def test_file_not_exists(self, mock_path): + mock_path.return_value = False + with self.assertRaises(FileNotFoundError): + self.led_controller._node_exists(Path("test-fake")) + + @patch("pathlib.Path.read_text") + @patch("pathlib.Path.exists") + def test_read_file(self, mock_path, mock_read): + mock_path.return_value = True + mock_read.return_value = "test-string" + self.assertEqual( + mock_read.return_value, + self.led_controller._read_node(Path("test-fake"))) + + @patch("pathlib.Path.write_text") + @patch("pathlib.Path.read_text") + @patch("pathlib.Path.exists") + def test_write_failed(self, mock_path, mock_read, mock_write): + mock_path.return_value = True + mock_read.return_value = "33" + + with self.assertRaises(ValueError): + self.led_controller._write_node(Path("test-fake"), "22", True) + + @patch("pathlib.Path.write_text") + @patch("led_control_test.SysFsLEDController._read_node") + @patch("pathlib.Path.exists") + def test_write_passed(self, mock_path, mock_read, mock_write): + mock_path.return_value = True + mock_read.return_value = "22" + + self.led_controller._write_node( + self.led_controller.led_node, "22", True) + mock_read.assert_called_once_with(self.led_controller.led_node) + mock_write.assert_called_once_with("22") + + @patch("led_control_test.SysFsLEDController._write_node") + @patch("led_control_test.SysFsLEDController.off") + @patch("led_control_test.SysFsLEDController._get_initial_state") + @patch("led_control_test.SysFsLEDController._read_node") + def test_initial_sysfs_controller_property( + self, mock_read, mock_get_initial, mock_off, mock_write): + mock_read.return_value = 255 + + led_name = "status" + with SysFsLEDController(led_name, "3", "0") as led_controller: + mock_read.assert_called_once_with( + led_controller.max_brightness_node) + mock_get_initial.assert_called_once_with() + mock_off.assert_called_once_with() + mock_write.assert_called_once_with( + led_controller.trigger_node, "none", False) + self.assertEqual(led_controller.led_name, led_name) + self.assertIsInstance(led_controller.led_node, PosixPath) + self.assertIsInstance(led_controller.brightness_node, PosixPath) + self.assertIsInstance(led_controller.trigger_node, PosixPath) + self.assertEqual(led_controller.led_node.name, led_name) + self.assertEqual(led_controller.brightness_node.name, "brightness") + self.assertEqual(led_controller.trigger_node.name, "trigger") + self.assertDictEqual( + led_controller.initial_state, + {"trigger": None, "brightness": None}) + + @patch("led_control_test.SysFsLEDController._read_node") + def test_get_brightness(self, mock_read): + mock_read.return_value = "33" + self.assertEqual(self.led_controller.brightness, "33") + mock_read.assert_called_once_with( + self.led_controller.brightness_node) + + @patch("led_control_test.SysFsLEDController._write_node") + def test_set_brightness(self, mock_write): + self.led_controller.brightness = "33" + mock_write.assert_called_once_with( + self.led_controller.brightness_node, "33") + + @patch("led_control_test.SysFsLEDController._read_node") + def test_get_trigger(self, mock_read): + expected_data = "[none] usb-gadget rfkill-any kbd-scrolllock" + mock_read.return_value = expected_data + + self.assertEqual(self.led_controller.trigger, expected_data) + + @patch("led_control_test.SysFsLEDController._read_node") + def test_get_initial_state(self, mock_read): + mock_read.side_effect = [ + "none usb-gadget [usb-host] rfkill-any rfkill-none kbd-scrolllock", + "255" + ] + expected_data = {"trigger": "usb-host", "brightness": "255"} + self.led_controller._get_initial_state() + self.assertEqual(expected_data, self.led_controller.initial_state) + + @patch("led_control_test.SysFsLEDController.off") + @patch("led_control_test.SysFsLEDController.on") + def test_blinking_test(self, mock_on, mock_off): + + self.led_controller.blinking(1, 0.5) + mock_on.assert_called_with() + mock_off.assert_called_with() + + +class TestArgumentParser(unittest.TestCase): + + def test_parser(self): + sys.argv = [ + "led_control_test.py", "--debug", "-n", "fake-led", + "--on-value", "33", "--off-value", "1", "-d", "30", "-i", "2" + + ] + args = register_arguments() + + self.assertEqual(args.debug, True) + self.assertEqual(args.name, "fake-led") + self.assertEqual(args.duration, 30) + self.assertEqual(args.interval, 2) + self.assertEqual(args.on_value, 33) + self.assertEqual(args.off_value, 1) diff --git a/providers/base/units/led/jobs.pxu b/providers/base/units/led/jobs.pxu index 287b9c9238..1ede08616b 100644 --- a/providers/base/units/led/jobs.pxu +++ b/providers/base/units/led/jobs.pxu @@ -347,4 +347,44 @@ estimated_duration: 10 imports: from com.canonical.plainbox import manifest requires: manifest.has_led_indicator == 'True' command: - led_test.sh -t sysfs -n {path} + led_control_test.py -n {path} -d 10 + +id: led-indicator/gpio-controller-leds +plugin: resource +_summary: Gather a list of LED indicators for the device that controls via GPIO chip. +_description: + A LED GPIO devices mapping resource that relies on the user specifying in config variable. + Refer to the /sys/kernel/debug/gpio for the gpiochip number + Usage of parameter: GPIO_CONTROLLER_LEDS={name1}:{controller1}:{port1} {name2}:{controller1}:{port2} ... + e.g. GPIO_CONTROLLER_LEDS=dl14:3:1 dl15:3:2 dl16:3:3 +estimated_duration: 3 +environ: GPIO_CONTROLLER_LEDS +command: + gpio_control_test.py led-resource "$GPIO_CONTROLLER_LEDS" + +unit: template +template-resource: led-indicator/gpio-controller-leds +template-unit: job +category_id: led +id: led-indicator/gpio-controller-leds-{name} +estimated_duration: 10 +plugin: user-interact-verify +user: root +imports: from com.canonical.plainbox import manifest +requires: manifest.has_led_indicator == 'True' +flags: also-after-suspend +_summary: Check control of {name} LED indecator. +_description: + Check that {name} LED turns on and off. +_steps: + 1. Press Enter and observe LED behavior on DUT. +_verification: + Does the "{name}" LED blink? +command: + if (snap connections | grep "$SNAP_NAME:gpio"); then + # the gpio slots has been connected to checkbox, skip the GPIO export steps + gpio_control_test.py led -n {name} --gpio-chip {chip_number} --gpio-pin {port} + else + # the gpio slots has not been connected to checkbox, perform the GPIO export steps during testing + gpio_control_test.py led -n {name} --gpio-chip {chip_number} --gpio-pin {port} --need-export + fi diff --git a/providers/base/units/led/test-plan.pxu b/providers/base/units/led/test-plan.pxu index 232dc13ae6..9eba1cb055 100644 --- a/providers/base/units/led/test-plan.pxu +++ b/providers/base/units/led/test-plan.pxu @@ -119,21 +119,29 @@ bootstrap_include: id: led-indicator-manual unit: test plan _name: Manual LED indicator tests for IoT -_description: Manual LED indicator tests for IoT devices +_description: + Manual LED indicator tests for IoT devices + Notes: the led-indicator/gpio-leds will be deprecated in next release include: led-indicator/gpio-leds-.* + led-indicator/gpio-controller-leds-.* led-indicator/sysfs-leds-.* bootstrap_include: led-indicator/gpio-leds + led-indicator/gpio-controller-leds led-indicator/sysfs-leds id: after-suspend-led-indicator-manual unit: test plan _name: Manual LED indicator tests for IoT (after_suspend) -_description: Manual LED indicator tests for IoT devices (after_suspend) +_description: + Manual LED indicator tests for IoT devices (after_suspend) + Notes: the led-indicator/gpio-leds will be deprecated in next release include: after-suspend-led-indicator/gpio-leds-.* + after-suspend-led-indicator/gpio-controller-leds-.* after-suspend-led-indicator/sysfs-leds-.* bootstrap_include: led-indicator/gpio-leds + led-indicator/gpio-controller-leds led-indicator/sysfs-leds From de233440009723dce060951135c1e120f569a60e Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 21 Feb 2024 10:40:41 +0100 Subject: [PATCH 043/108] Support case insensitive searching in names or ids (bugfix) (#1005) Support searching by id and case insensitive --- checkbox-ng/checkbox_ng/urwid_ui.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/checkbox-ng/checkbox_ng/urwid_ui.py b/checkbox-ng/checkbox_ng/urwid_ui.py index ecafcea72e..e3dfbc714c 100644 --- a/checkbox-ng/checkbox_ng/urwid_ui.py +++ b/checkbox-ng/checkbox_ng/urwid_ui.py @@ -766,11 +766,15 @@ def unhandled_input(self, key): if filter_str == "": self._update_button_pile(self.controller_list) else: + filter_str = filter_str.lower() self._update_button_pile( [ x for x in self.controller_list - if filter_str in x.get("name") + if ( + filter_str in x.get("name").lower() + or filter_str in x.get("id").lower() + ) ] ) if key in ("esc", "enter"): From d3b343b3e6d2eb83515f57813a0e611a8b29c9d1 Mon Sep 17 00:00:00 2001 From: kissiel Date: Wed, 21 Feb 2024 15:52:55 +0100 Subject: [PATCH 044/108] Be verbose when breaking pipes when interacting with bluetoothctl (BugFix) (#1006) * be verbose when breaking pipes with BT * add unit tests * fix python3.5 issues * properly mock stdin * more unit tests --- .../checkbox_support/interactive_cmd.py | 3 + .../tests/test_interactive_cmd.py | 55 +++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 checkbox-support/checkbox_support/tests/test_interactive_cmd.py diff --git a/checkbox-support/checkbox_support/interactive_cmd.py b/checkbox-support/checkbox_support/interactive_cmd.py index b7b8f7b22f..7734326871 100644 --- a/checkbox-support/checkbox_support/interactive_cmd.py +++ b/checkbox-support/checkbox_support/interactive_cmd.py @@ -84,6 +84,9 @@ def writeline(self, line, sleep=0.1): self._proc.stdin.flush() except BrokenPipeError: self._logger.warning("Broken pipe when sending to the process!") + if self._pending: + self._logger.warning( + "The output before the pipe broke: %s", self.read_all()) self._close_fds([self._proc.stdin]) raise time.sleep(sleep) diff --git a/checkbox-support/checkbox_support/tests/test_interactive_cmd.py b/checkbox-support/checkbox_support/tests/test_interactive_cmd.py new file mode 100644 index 0000000000..eec22c8775 --- /dev/null +++ b/checkbox-support/checkbox_support/tests/test_interactive_cmd.py @@ -0,0 +1,55 @@ +# This file is part of Checkbox. +# +# Copyright 2024 Canonical Ltd. +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . + +import unittest + +from unittest.mock import MagicMock, patch + +from checkbox_support.interactive_cmd import InteractiveCommand + +class InteractiveCommandTests(unittest.TestCase): + + @patch('sys.stdin') + def test_write_line_nominal(self, mock_stdin): + mock_self = MagicMock() + mock_stdin.encoding = 'utf-8' + InteractiveCommand.writeline(mock_self, 'Hello, world!') + mock_self._proc.stdin.write.assert_called_with(b'Hello, world!\n') + + @patch('sys.stdin') + def test_write_line_broken_pipe_with_pending(self, mock_stdin): + mock_self = MagicMock() + mock_self._proc.stdin.write.side_effect = BrokenPipeError + mock_stdin.encoding = 'utf-8' + mock_self.read_all.return_value = 'my pipe is gonna break' + with self.assertRaises(BrokenPipeError): + InteractiveCommand.writeline(mock_self, 'Hello, world!') + mock_self._logger.warning.assert_called_with( + "The output before the pipe broke: %s", + "my pipe is gonna break") + mock_self._proc.stdin.write.assert_called_with(b'Hello, world!\n') + + @patch('sys.stdin') + def test_write_line_broken_pipe_without_pending(self, mock_stdin): + mock_self = MagicMock() + mock_self._proc.stdin.write.side_effect = BrokenPipeError + mock_self._pending = 0 + mock_stdin.encoding = 'utf-8' + mock_self.read_all.return_value = '' + with self.assertRaises(BrokenPipeError): + InteractiveCommand.writeline(mock_self, 'Hello, world!') + mock_self._logger.warning.assert_called_with( + "Broken pipe when sending to the process!") + mock_self._proc.stdin.write.assert_called_with(b'Hello, world!\n') \ No newline at end of file From af097ed647ffb233241c4db14025b238ee462fc5 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 21 Feb 2024 17:58:02 +0100 Subject: [PATCH 045/108] Resume Screen for Checkbox Remote expose API (Breaking) (#1007) * Bump the version number * Expose needed APIs and use those instead of the private _sa api * Update tests with new api locations for RA * Update tests with new api locations for controller --- .../checkbox_ng/launcher/controller.py | 10 ++-- .../checkbox_ng/launcher/test_controller.py | 58 +++++++++---------- .../plainbox/impl/session/remote_assistant.py | 16 ++++- .../impl/session/test_remote_assistant.py | 23 ++++---- 4 files changed, 58 insertions(+), 49 deletions(-) diff --git a/checkbox-ng/checkbox_ng/launcher/controller.py b/checkbox-ng/checkbox_ng/launcher/controller.py index f5b5ea9c77..6690dbc796 100644 --- a/checkbox-ng/checkbox_ng/launcher/controller.py +++ b/checkbox-ng/checkbox_ng/launcher/controller.py @@ -445,15 +445,15 @@ def _resume_session_menu(self, resumable_sessions): return False def _resume_session(self, resume_params): - metadata = self.sa._sa.resume_session(resume_params.session_id) + metadata = self.sa.resume_session(resume_params.session_id) if "testplanless" not in metadata.flags: app_blob = json.loads(metadata.app_blob.decode("UTF-8")) test_plan_id = app_blob["testplan_id"] - self.sa._sa.select_test_plan(test_plan_id) - self.sa._sa.bootstrap() + self.sa.select_test_plan(test_plan_id) + self.sa.bootstrap() last_job = metadata.running_job_name is_cert_blocker = ( - self.sa._sa.get_job_state(last_job).effective_certification_status + self.sa.get_job_state(last_job).effective_certification_status == "blocker" ) # If we resumed maybe not rerun the same, probably broken job @@ -811,7 +811,7 @@ def _maybe_manual_rerun_jobs(self): def _run_jobs(self, jobs_repr, total_num=0): for job in jobs_repr: - job_state = self.sa._sa.get_job_state(job["id"]) + job_state = self.sa.get_job_state(job["id"]) self.sa.note_metadata_starting_job(job, job_state) SimpleUI.header( _("Running job {} / {}").format( diff --git a/checkbox-ng/checkbox_ng/launcher/test_controller.py b/checkbox-ng/checkbox_ng/launcher/test_controller.py index 45c49f1515..67fddf1cf7 100644 --- a/checkbox-ng/checkbox_ng/launcher/test_controller.py +++ b/checkbox-ng/checkbox_ng/launcher/test_controller.py @@ -151,7 +151,7 @@ def test_finish_session_all_pass(self): "job1": mock.MagicMock(result=mock.MagicMock(outcome="pass")), "job2": mock.MagicMock(result=mock.MagicMock(outcome="pass")), } - self_mock._sa.manager.default_device_context._state._job_state_map = ( + self_mock.manager.default_device_context._state._job_state_map = ( mock_job_state_map ) RemoteController.finish_session(self_mock) @@ -170,12 +170,12 @@ def test_finish_session_with_failure(self): "job2": mock.MagicMock(result=mock.MagicMock(outcome="fail")), "job3": mock.MagicMock(result=mock.MagicMock(outcome="pass")), } - self_mock._sa.manager.default_device_context._state._job_state_map = ( + self_mock.manager.default_device_context._state._job_state_map = ( mock_job_state_map ) RemoteController.finish_session(self_mock) - self.assertTrue(self_mock._has_anything_failed) + self.assertTrue(self_mock._sa._has_anything_failed) def test_finish_session_with_crash(self): """ @@ -189,12 +189,12 @@ def test_finish_session_with_crash(self): "job2": mock.MagicMock(result=mock.MagicMock(outcome="crash")), "job3": mock.MagicMock(result=mock.MagicMock(outcome="pass")), } - self_mock._sa.manager.default_device_context._state._job_state_map = ( + self_mock.manager.default_device_context._state._job_state_map = ( mock_job_state_map ) RemoteController.finish_session(self_mock) - self.assertTrue(self_mock._has_anything_failed) + self.assertTrue(self_mock._sa._has_anything_failed) @mock.patch("checkbox_ng.launcher.controller.SimpleUI") @mock.patch("checkbox_ng.launcher.controller.resume_dialog") @@ -538,8 +538,8 @@ def test_resume_session_pass( flags=[], running_job_name="job1", ) - sa_mock._sa.resume_session.return_value = metadata_mock - sa_mock._sa.get_job_state.return_value = mock.MagicMock( + sa_mock.resume_session.return_value = metadata_mock + sa_mock.get_job_state.return_value = mock.MagicMock( effective_certification_status="non-blocker" ) @@ -548,9 +548,9 @@ def test_resume_session_pass( RemoteController._resume_session(self_mock, resume_params) # Assertions - sa_mock._sa.resume_session.assert_called_once_with("123") - sa_mock._sa.select_test_plan.assert_called_once_with("abc") - self.assertTrue(sa_mock._sa.bootstrap.called) + sa_mock.resume_session.assert_called_once_with("123") + sa_mock.select_test_plan.assert_called_once_with("abc") + self.assertTrue(sa_mock.bootstrap.called) sa_mock.resume_by_id.assert_called_once_with( "123", { @@ -583,8 +583,8 @@ def test_resume_session_fail_not_cert_blocker( flags=[], running_job_name="job1", ) - sa_mock._sa.resume_session.return_value = metadata_mock - sa_mock._sa.get_job_state.return_value = mock.MagicMock( + sa_mock.resume_session.return_value = metadata_mock + sa_mock.get_job_state.return_value = mock.MagicMock( effective_certification_status="non-blocker" ) @@ -593,9 +593,9 @@ def test_resume_session_fail_not_cert_blocker( RemoteController._resume_session(self_mock, resume_params) # Assertions - sa_mock._sa.resume_session.assert_called_once_with("123") - sa_mock._sa.select_test_plan.assert_called_once_with("abc") - self.assertTrue(sa_mock._sa.bootstrap.called) + sa_mock.resume_session.assert_called_once_with("123") + sa_mock.select_test_plan.assert_called_once_with("abc") + self.assertTrue(sa_mock.bootstrap.called) sa_mock.resume_by_id.assert_called_once_with( "123", { @@ -628,8 +628,8 @@ def test_resume_session_fail_cert_blocker( flags=["testplanless"], running_job_name="job1", ) - sa_mock._sa.resume_session.return_value = metadata_mock - sa_mock._sa.get_job_state.return_value = mock.MagicMock( + sa_mock.resume_session.return_value = metadata_mock + sa_mock.get_job_state.return_value = mock.MagicMock( effective_certification_status="blocker" ) @@ -638,7 +638,7 @@ def test_resume_session_fail_cert_blocker( RemoteController._resume_session(self_mock, resume_params) # Assertions - sa_mock._sa.resume_session.assert_called_once_with("123") + sa_mock.resume_session.assert_called_once_with("123") sa_mock.resume_by_id.assert_called_once_with( "123", { @@ -671,8 +671,8 @@ def test_resume_session_skip_not_cert_blocker( flags=[], running_job_name="job1", ) - sa_mock._sa.resume_session.return_value = metadata_mock - sa_mock._sa.get_job_state.return_value = mock.MagicMock( + sa_mock.resume_session.return_value = metadata_mock + sa_mock.get_job_state.return_value = mock.MagicMock( effective_certification_status="non-blocker" ) @@ -681,9 +681,9 @@ def test_resume_session_skip_not_cert_blocker( RemoteController._resume_session(self_mock, resume_params) # Assertions - sa_mock._sa.resume_session.assert_called_once_with("123") - sa_mock._sa.select_test_plan.assert_called_once_with("abc") - self.assertTrue(sa_mock._sa.bootstrap.called) + sa_mock.resume_session.assert_called_once_with("123") + sa_mock.select_test_plan.assert_called_once_with("abc") + self.assertTrue(sa_mock.bootstrap.called) sa_mock.resume_by_id.assert_called_once_with( "123", { @@ -716,8 +716,8 @@ def test_resume_session_skip_cert_blocker( flags=["testplanless"], running_job_name="job1", ) - sa_mock._sa.resume_session.return_value = metadata_mock - sa_mock._sa.get_job_state.return_value = mock.MagicMock( + sa_mock.resume_session.return_value = metadata_mock + sa_mock.get_job_state.return_value = mock.MagicMock( effective_certification_status="blocker" ) @@ -726,7 +726,7 @@ def test_resume_session_skip_cert_blocker( RemoteController._resume_session(self_mock, resume_params) # Assertions - sa_mock._sa.resume_session.assert_called_once_with("123") + sa_mock.resume_session.assert_called_once_with("123") sa_mock.resume_by_id.assert_called_once_with( "123", { @@ -759,8 +759,8 @@ def test_resume_session_rerun( flags=["testplanless"], running_job_name="job1", ) - sa_mock._sa.resume_session.return_value = metadata_mock - sa_mock._sa.get_job_state.return_value = mock.MagicMock( + sa_mock.resume_session.return_value = metadata_mock + sa_mock.get_job_state.return_value = mock.MagicMock( effective_certification_status="blocker" ) @@ -769,7 +769,7 @@ def test_resume_session_rerun( RemoteController._resume_session(self_mock, resume_params) # Assertions - sa_mock._sa.resume_session.assert_called_once_with("123") + sa_mock.resume_session.assert_called_once_with("123") sa_mock.resume_by_id.assert_called_once_with( "123", { diff --git a/checkbox-ng/plainbox/impl/session/remote_assistant.py b/checkbox-ng/plainbox/impl/session/remote_assistant.py index c07ff8cab2..0ccbf9f604 100644 --- a/checkbox-ng/plainbox/impl/session/remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/remote_assistant.py @@ -143,7 +143,7 @@ def outcome(self): class RemoteSessionAssistant: """Remote execution enabling wrapper for the SessionAssistant""" - REMOTE_API_VERSION = 12 + REMOTE_API_VERSION = 13 def __init__(self, cmd_callback): _logger.debug("__init__()") @@ -349,6 +349,9 @@ def start_session(self, configuration): ) # sorted by name return self._available_testplans + def select_test_plan(self, test_plan_id): + return self._sa.select_test_plan(test_plan_id) + @allowed_when(Started) def prepare_bootstrapping(self, test_plan_id): """Save picked test plan to the app blob.""" @@ -635,6 +638,9 @@ def prepare_rerun_candidates(self, rerun_candidates): def get_job_result(self, job_id): return self._sa.get_job_state(job_id).result + def get_job_state(self, job_id): + return self._sa.get_job_state(job_id) + def get_jobs_repr(self, job_ids, offset=0): """ Translate jobs into a {'field': 'val'} representations. @@ -688,6 +694,12 @@ def delete_sessions(self, session_list): def get_resumable_sessions(self): return self._sa.get_resumable_sessions() + def resume_session(self, session_id, runner_kwargs={}): + return self._sa.resume_session(session_id, runner_kwargs=runner_kwargs) + + def bootstrap(self): + return self._sa.bootstrap() + def resume_by_id(self, session_id=None, overwrite_result_dict={}): _logger.info("resume_by_id: %r", session_id) self._launcher = load_configs() @@ -706,7 +718,7 @@ def resume_by_id(self, session_id=None, overwrite_result_dict={}): "stdin": self._pipe_to_subproc, "extra_env": self.prepare_extra_env, } - meta = self._sa.resume_session(session_id, runner_kwargs=runner_kwargs) + meta = self.resume_session(session_id, runner_kwargs=runner_kwargs) app_blob = json.loads(meta.app_blob.decode("UTF-8")) launcher_from_controller = Configuration.from_text( app_blob["launcher"], "Remote launcher" diff --git a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py index e1a0ccdf6c..61ca30dd62 100644 --- a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py @@ -75,7 +75,7 @@ def test_start_session_with_launcher(self, mock_filter, mock_gnu): extra_cfg = dict() extra_cfg["launcher"] = "test_launcher" rsa = mock.Mock() - rsa._sa.get_test_plans.return_value = [mock.Mock()] + rsa.get_test_plans.return_value = [mock.Mock()] rsa._state = remote_assistant.Idle with mock.patch("plainbox.impl.config.Configuration.from_text") as cm: cm.return_value = Configuration() @@ -93,7 +93,7 @@ def test_start_session_without_launcher(self, mock_filter, mock_gnu): extra_cfg = dict() extra_cfg["launcher"] = "test_launcher" rsa = mock.Mock() - rsa._sa.get_test_plans.return_value = [mock.Mock()] + rsa.get_test_plans.return_value = [mock.Mock()] rsa._state = remote_assistant.Idle with mock.patch("plainbox.impl.config.Configuration.from_text") as cm: cm.return_value = Configuration() @@ -111,7 +111,7 @@ def test_resume_by_id_with_session_id(self): mock_meta = mock.Mock() mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' - rsa._sa.resume_session.return_value = mock_meta + rsa.resume_session.return_value = mock_meta remote_assistant.RemoteSessionAssistant.resume_by_id(rsa, "session_id") self.assertEqual(rsa._state, "testsselected") @@ -124,7 +124,7 @@ def test_resume_by_id_bad_session_id(self): mock_meta = mock.Mock() mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' - rsa._sa.resume_session.return_value = mock_meta + rsa.resume_session.return_value = mock_meta remote_assistant.RemoteSessionAssistant.resume_by_id(rsa, "bad_id") self.assertEqual(rsa._state, "idle") @@ -137,7 +137,7 @@ def test_resume_by_id_without_session_id(self): mock_meta = mock.Mock() mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' - rsa._sa.resume_session.return_value = mock_meta + rsa.resume_session.return_value = mock_meta remote_assistant.RemoteSessionAssistant.resume_by_id(rsa) self.assertEqual(rsa._state, "testsselected") @@ -153,7 +153,7 @@ def test_resume_by_id_with_result_file_ok(self, mock_load_configs): mock_meta = mock.Mock() mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' - rsa._sa.resume_session.return_value = mock_meta + rsa.resume_session.return_value = mock_meta os_path_exists_mock = mock.Mock() with mock.patch("plainbox.impl.session.remote_assistant._") as mock__: @@ -193,7 +193,7 @@ def test_resume_by_id_with_result_file_garbage_outcome( mock_meta = mock.Mock() mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' - rsa._sa.resume_session.return_value = mock_meta + rsa.resume_session.return_value = mock_meta os_path_exists_mock = mock.Mock() with mock.patch("plainbox.impl.session.remote_assistant._") as mock__: @@ -233,10 +233,9 @@ def test_resume_by_id_with_result_no_file_noreturn( mock_meta = mock.Mock() mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' - rsa._sa.resume_session.return_value = mock_meta + rsa.resume_session.return_value = mock_meta os_path_exists_mock = mock.Mock() - rsa._sa.get_job = mock.Mock() rsa._sa.get_job.return_value.plugin = "shell" with mock.patch("os.path.exists", os_path_exists_mock): @@ -268,10 +267,9 @@ def test_resume_by_id_with_result_no_file_normal(self, mock_load_configs): mock_meta = mock.Mock() mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' - rsa._sa.resume_session.return_value = mock_meta + rsa.resume_session.return_value = mock_meta os_path_exists_mock = mock.Mock() - rsa._sa.get_job = mock.Mock() rsa._sa.get_job.return_value.plugin = "shell" with mock.patch("os.path.exists", os_path_exists_mock): @@ -301,7 +299,7 @@ def test_resume_by_id_with_result_file_not_json(self, mock_load_configs): mock_meta = mock.Mock() mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' - rsa._sa.resume_session.return_value = mock_meta + rsa.resume_session.return_value = mock_meta os_path_exists_mock = mock.Mock() with mock.patch("plainbox.impl.session.remote_assistant._") as mock__: mock__.side_effect = lambda x: x @@ -377,7 +375,6 @@ def test_get_resumable_sessions(self): class RemoteAssistantFinishJobTests(TestCase): def setUp(self): self.rsa = mock.MagicMock() - self.rsa._sa = mock.Mock() self.rsa._be = None @mock.patch("plainbox.impl.session.remote_assistant.JobResultBuilder") From a88e5d01285e3946d7cc684bea191d0ead55e329 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Thu, 22 Feb 2024 16:08:29 +0100 Subject: [PATCH 046/108] Add tests for resume menu (infra) (#1008) * Add tests for resume menu * Remove random newline everywhere in license * Removed reduntant test tag --- .../cert_blocker_comment/launcher.py | 3 +- .../metabox/scenarios/config/environment.py | 1 - .../scenarios/config/test_selection.py | 2 - metabox/metabox/scenarios/ui/interact_jobs.py | 1 - metabox/metabox/scenarios/ui/resume_menu.py | 300 ++++++++++++++++++ metabox/metabox/scenarios/ui/testplan.py | 1 - 6 files changed, 301 insertions(+), 7 deletions(-) create mode 100644 metabox/metabox/scenarios/ui/resume_menu.py diff --git a/metabox/metabox/scenarios/cert_blocker_comment/launcher.py b/metabox/metabox/scenarios/cert_blocker_comment/launcher.py index 885ff2f703..6095892fbb 100644 --- a/metabox/metabox/scenarios/cert_blocker_comment/launcher.py +++ b/metabox/metabox/scenarios/cert_blocker_comment/launcher.py @@ -7,7 +7,6 @@ # Checkbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, # as published by the Free Software Foundation. - # # Checkbox is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -277,4 +276,4 @@ class ManualJobSkippedWhenResumingSession(Scenario): Expect("Select jobs to re-run"), Send("f" + keys.KEY_ENTER), Expect(_re("(☐|job skipped).*A simple manual job")), - ] \ No newline at end of file + ] diff --git a/metabox/metabox/scenarios/config/environment.py b/metabox/metabox/scenarios/config/environment.py index 206c73f45f..190dfab43b 100644 --- a/metabox/metabox/scenarios/config/environment.py +++ b/metabox/metabox/scenarios/config/environment.py @@ -7,7 +7,6 @@ # Checkbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, # as published by the Free Software Foundation. - # # Checkbox is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/metabox/metabox/scenarios/config/test_selection.py b/metabox/metabox/scenarios/config/test_selection.py index b6e830aef9..6e5db66755 100644 --- a/metabox/metabox/scenarios/config/test_selection.py +++ b/metabox/metabox/scenarios/config/test_selection.py @@ -7,7 +7,6 @@ # Checkbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, # as published by the Free Software Foundation. - # # Checkbox is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -299,4 +298,3 @@ class TestPlanSelectionFilter(Scenario): Send("i"), AssertNotPrinted("smoke") ] - diff --git a/metabox/metabox/scenarios/ui/interact_jobs.py b/metabox/metabox/scenarios/ui/interact_jobs.py index f8dd646ec9..9a36b248bc 100644 --- a/metabox/metabox/scenarios/ui/interact_jobs.py +++ b/metabox/metabox/scenarios/ui/interact_jobs.py @@ -7,7 +7,6 @@ # Checkbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, # as published by the Free Software Foundation. - # # Checkbox is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/metabox/metabox/scenarios/ui/resume_menu.py b/metabox/metabox/scenarios/ui/resume_menu.py new file mode 100644 index 0000000000..4030d2e6e5 --- /dev/null +++ b/metabox/metabox/scenarios/ui/resume_menu.py @@ -0,0 +1,300 @@ +# This file is part of Checkbox. +# +# Copyright 2024 Canonical Ltd. +# Written by: +# Massimiliano Girardi +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . +import textwrap + +import metabox.core.keys as keys +from metabox.core.actions import Expect, Send, Start, SelectTestPlan +from metabox.core.scenario import Scenario +from metabox.core.utils import tag + + +@tag("manual", "resume") +class ResumeMenuMultipleDelete(Scenario): + modes = ["remote"] + launcher = textwrap.dedent( + """ + [launcher] + launcher_version = 1 + stock_reports = text + [test selection] + forced = yes + """ + ) + + steps = [ + # Generate 2 resume candidates + Start(), + Expect("Select test plan"), + SelectTestPlan( + "2021.com.canonical.certification::cert-blocker-manual-resume" + ), + Send(keys.KEY_ENTER), + Expect("Pick an action"), + Send("p" + keys.KEY_ENTER), + Expect("Pick an action"), + Send("q" + keys.KEY_ENTER), + Expect("Session saved"), + Start(), + Expect("Select test plan"), + SelectTestPlan( + "2021.com.canonical.certification::cert-blocker-manual-resume" + ), + Send(keys.KEY_ENTER), + Expect("Pick an action"), + Send("p" + keys.KEY_ENTER), + Expect("Pick an action"), + Send("q" + keys.KEY_ENTER), + Expect("Session saved"), + Start(), + Expect("Resume session"), + # Enter the resume menu + Send("r"), + Expect("Incomplete sessions"), + # Delete first + Send("d"), + # More session available, remain in the resume menun + Expect("Incomplete sessions"), + # Delete second + Send("d"), + # No more sessions available, go back to test plan selection + Expect("Select test plan"), + # Now we still have to be able to run test plans + SelectTestPlan("2021.com.canonical.certification::whoami_as_user_tp "), + Send(keys.KEY_ENTER), + Expect("Results"), + ] + + +@tag("manual", "resume") +class ResumeMenuMarkSkip(Scenario): + modes = ["remote"] + launcher = textwrap.dedent( + """ + [launcher] + launcher_version = 1 + stock_reports = text + [test selection] + forced = yes + """ + ) + + steps = [ + # Generate 1 resume candidates + Start(), + Expect("Select test plan"), + SelectTestPlan( + "2021.com.canonical.certification::cert-blocker-manual-resume" + ), + Send(keys.KEY_ENTER), + Expect("Pick an action"), + Send("p" + keys.KEY_ENTER), + Expect("Pick an action"), + Send("q" + keys.KEY_ENTER), + Expect("Session saved"), + Start(), + Expect("Resume session"), + # Enter the resume menu + Send("r"), + Expect("Incomplete sessions"), + Send(keys.KEY_ENTER), + Expect("last job?"), + # select Skip + Send(keys.KEY_DOWN * 1), + Send(keys.KEY_ENTER), + # Job is a cert blocker, it must ask for a comment + Expect("Please enter your comments"), + Send("Comment" + keys.KEY_ENTER), + Expect("Skipped Jobs"), + Expect("Finish"), + Send("f"), + Expect("Result"), + ] + + +@tag("manual", "resume") +class ResumeMenuMarkFail(Scenario): + modes = ["remote"] + launcher = textwrap.dedent( + """ + [launcher] + launcher_version = 1 + stock_reports = text + [test selection] + forced = yes + """ + ) + + steps = [ + # Generate 1 resume candidates + Start(), + Expect("Select test plan"), + SelectTestPlan( + "2021.com.canonical.certification::cert-blocker-manual-resume" + ), + Send(keys.KEY_ENTER), + Expect("Pick an action"), + Send("p" + keys.KEY_ENTER), + Expect("Pick an action"), + Send("q" + keys.KEY_ENTER), + Expect("Session saved"), + Start(), + Expect("Resume session"), + # Enter the resume menu + Send("r"), + Expect("Incomplete sessions"), + Send(keys.KEY_ENTER), + Expect("last job?"), + # select Skip + Send(keys.KEY_DOWN), + Send(keys.KEY_DOWN), + Send(keys.KEY_DOWN), + Send(keys.KEY_ENTER), + # Job is a cert blocker, it must ask for a comment + Expect("Please enter your comments"), + Send("Comment" + keys.KEY_ENTER), + # Now we still have to be able to run test plans + Expect("Failed Jobs"), + Expect("Finish"), + Send("f"), + Expect("Result"), + ] + +@tag("manual", "resume") +class ResumeMenuMarkPreCommentFail(Scenario): + modes = ["remote"] + launcher = textwrap.dedent( + """ + [launcher] + launcher_version = 1 + stock_reports = text + [test selection] + forced = yes + """ + ) + + steps = [ + # Generate 1 resume candidates + Start(), + Expect("Select test plan"), + SelectTestPlan( + "2021.com.canonical.certification::cert-blocker-manual-resume" + ), + Send(keys.KEY_ENTER), + Expect("Pick an action"), + Send("p" + keys.KEY_ENTER), + Expect("Pick an action"), + Send("q" + keys.KEY_ENTER), + Expect("Session saved"), + Start(), + Expect("Resume session"), + # Enter the resume menu + Send("r"), + Expect("Incomplete sessions"), + Send(keys.KEY_ENTER), + Expect("last job?"), + # select Comment + Send(keys.KEY_ENTER), + Expect("Enter comment"), + Send("Job failed due to reason" + keys.KEY_ENTER), + # select Skip + Send("f"), + # Job is a cert blocker, but it should not ask for a comment as it was + # provided from the resume menu + Expect("Failed Jobs"), + Expect("Finish"), + Send("f"), + Expect("Result"), + ] + +@tag("manual", "resume") +class ResumeMenuMarkPassed(Scenario): + modes = ["remote"] + launcher = textwrap.dedent( + """ + [launcher] + launcher_version = 1 + stock_reports = text + [test selection] + forced = yes + """ + ) + + steps = [ + # Generate 1 resume candidates + Start(), + Expect("Select test plan"), + SelectTestPlan( + "2021.com.canonical.certification::cert-blocker-manual-resume" + ), + Send(keys.KEY_ENTER), + Expect("Pick an action"), + Send("p" + keys.KEY_ENTER), + Expect("Pick an action"), + Send("q" + keys.KEY_ENTER), + Expect("Session saved"), + Start(), + Expect("Resume session"), + # Enter the resume menu + Send("r"), + Expect("Incomplete sessions"), + Send(keys.KEY_ENTER), + Expect("last job?"), + # Select Mark as Pass + Send("p"), + Expect("Result"), + ] + +@tag("manual", "resume") +class ResumeMenuResumeLastJob(Scenario): + modes = ["remote"] + launcher = textwrap.dedent( + """ + [launcher] + launcher_version = 1 + stock_reports = text + [test selection] + forced = yes + """ + ) + + steps = [ + # Generate 1 resume candidates + Start(), + Expect("Select test plan"), + SelectTestPlan( + "2021.com.canonical.certification::cert-blocker-manual-resume" + ), + Send(keys.KEY_ENTER), + Expect("Pick an action"), + Send("p" + keys.KEY_ENTER), + Expect("Pick an action"), + Send("q" + keys.KEY_ENTER), + Expect("Session saved"), + Start(), + Expect("Resume session"), + # Enter the resume menu + Send("r"), + Expect("Incomplete sessions"), + Send(keys.KEY_ENTER), + Expect("last job?"), + # select Resume and run the job again + Send("R"), + Expect("press ENTER to continue"), + Send(keys.KEY_ENTER), + Expect("Result"), + ] diff --git a/metabox/metabox/scenarios/ui/testplan.py b/metabox/metabox/scenarios/ui/testplan.py index d2687133c1..a323157132 100644 --- a/metabox/metabox/scenarios/ui/testplan.py +++ b/metabox/metabox/scenarios/ui/testplan.py @@ -8,7 +8,6 @@ # Checkbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, # as published by the Free Software Foundation. - # # Checkbox is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of From cae8ac8d536c5c691e0a117c09e119bff056e6d0 Mon Sep 17 00:00:00 2001 From: Nancy Chen Date: Fri, 23 Feb 2024 20:24:20 +0800 Subject: [PATCH 047/108] Fixed failed ping test (BugFix) (#1012) run jobs using gateway_ping_test.py as root --- providers/base/units/networking/jobs.pxu | 2 ++ providers/base/units/suspend/suspend.pxu | 3 +++ 2 files changed, 5 insertions(+) diff --git a/providers/base/units/networking/jobs.pxu b/providers/base/units/networking/jobs.pxu index 48c8ac0b0a..30ea187b98 100644 --- a/providers/base/units/networking/jobs.pxu +++ b/providers/base/units/networking/jobs.pxu @@ -6,6 +6,7 @@ depends: ethernet/detect command: gateway_ping_test.py estimated_duration: 2.000 _description: Tests whether the system has a working Internet connection. +user: root unit: template template-resource: device @@ -52,6 +53,7 @@ id: networking/ping command: gateway_ping_test.py "$CHECKBOX_SERVER" _description: Automated test case to verify availability of some system on the network using ICMP ECHO packets. +user: root plugin: shell category_id: com.canonical.plainbox::networking diff --git a/providers/base/units/suspend/suspend.pxu b/providers/base/units/suspend/suspend.pxu index 9ca63be8a4..4ae1bfa509 100644 --- a/providers/base/units/suspend/suspend.pxu +++ b/providers/base/units/suspend/suspend.pxu @@ -5,6 +5,7 @@ depends: ethernet/detect estimated_duration: 1.2 _summary: Record the current network before suspending. command: set -o pipefail; gateway_ping_test.py | tee "$PLAINBOX_SESSION_SHARE"/network_before_suspend.txt +user: root plugin: shell category_id: com.canonical.plainbox::suspend @@ -401,6 +402,7 @@ estimated_duration: 20.0 depends: suspend/suspend_advanced_auto suspend/network_before_suspend _description: Test the network after resuming. command: network_wait.sh; gateway_ping_test.py | diff "$PLAINBOX_SESSION_SHARE"/network_before_suspend.txt - +user: root plugin: shell category_id: com.canonical.plainbox::suspend @@ -409,6 +411,7 @@ estimated_duration: 20.0 depends: suspend/suspend_advanced_auto suspend/network_before_suspend _description: Test the network after resuming. command: network_wait.sh; gateway_ping_test.py | diff "$PLAINBOX_SESSION_SHARE"/network_before_suspend.txt - +user: root plugin: shell category_id: com.canonical.plainbox::suspend From 0bc7bfd6e5837d24852ceea2b466b4ad4c560220 Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Fri, 23 Feb 2024 13:24:32 +0100 Subject: [PATCH 048/108] Opencv build from source on snaps (New) (#1011) Opencv build from source on snaps --- .../common_files/config/wrapper_common | 1 + .../config/wrapper_common_classic | 1 + .../series22/snap/snapcraft.yaml | 46 ++++++++++++++++++- 3 files changed, 47 insertions(+), 1 deletion(-) diff --git a/checkbox-core-snap/common_files/config/wrapper_common b/checkbox-core-snap/common_files/config/wrapper_common index a48f7a6f35..762414d143 100644 --- a/checkbox-core-snap/common_files/config/wrapper_common +++ b/checkbox-core-snap/common_files/config/wrapper_common @@ -31,6 +31,7 @@ else append_path PYTHONHOME $RUNTIME/usr append_path PYTHONPATH $RUNTIME/usr/lib/python3/dist-packages append_path PYTHONPATH $RUNTIME/lib/python3*/site-packages + append_path PYTHONPATH $RUNTIME/lib/python3*/dist-packages fi # make sure we always know where the content snap is especially for classic diff --git a/checkbox-core-snap/common_files/config/wrapper_common_classic b/checkbox-core-snap/common_files/config/wrapper_common_classic index 5144801a3a..d64634bbf3 100644 --- a/checkbox-core-snap/common_files/config/wrapper_common_classic +++ b/checkbox-core-snap/common_files/config/wrapper_common_classic @@ -42,6 +42,7 @@ else append_path PYTHONHOME $RUNTIME/usr append_path PYTHONPATH $RUNTIME/usr/lib/python3/dist-packages append_path PYTHONPATH $RUNTIME/lib/python3*/site-packages + append_path PYTHONPATH $RUNTIME/lib/python3*/dist-packages fi # make sure we always know where the content snap is especially for classic diff --git a/checkbox-core-snap/series22/snap/snapcraft.yaml b/checkbox-core-snap/series22/snap/snapcraft.yaml index 3e91b08bb7..c66238ccdc 100644 --- a/checkbox-core-snap/series22/snap/snapcraft.yaml +++ b/checkbox-core-snap/series22/snap/snapcraft.yaml @@ -330,7 +330,6 @@ parts: - python3-evdev - python3-gi - python3-natsort - - python3-opencv - python3-pil - python3-psutil - python3-pyqrcode @@ -459,6 +458,51 @@ parts: python3 manage.py build python3 manage.py install --layout=relocatable --prefix=/providers/checkbox-provider-tutorial --root="$SNAPCRAFT_PART_INSTALL" after: [checkbox-provider-base] +################################################################################ + opencv: + plugin: make + source: https://github.com/opencv/opencv.git + source-tag: 4.9.0 + override-build: | + cd $SNAPCRAFT_PART_SRC + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=RELEASE \ + -DCMAKE_INSTALL_PREFIX=$SNAPCRAFT_PART_INSTALL \ + -DBUILD_LIST=videoio,features2d,highgui,flann,python3 .. + make -j$(nproc) + make install + build-packages: + - build-essential + - cmake + - pkg-config + - libjpeg-dev + - libpng-dev + - libtiff-dev + - libavcodec-dev + - libavformat-dev + - libswscale-dev + - libv4l-dev + - libxvidcore-dev + - libx264-dev + - libgtk-3-dev + - gfortran + - python3-dev + - python3-numpy + stage-packages: + - libjpeg8 + - libpng16-16 + - libtiff5 + - libavcodec58 + - libavformat58 + - libfreetype6 + - libswscale5 + - libv4l-0 + - libxvidcore4 + - libx264-dev + - python3-minimal + - python3.10-minimal + - python3-numpy ################################################################################ gnome-randr: source: https://github.com/maxwellainatchi/gnome-randr-rust.git From bfecdb0f246687314a1e048dcced17e0161e34ce Mon Sep 17 00:00:00 2001 From: Pei Yao-Chang Date: Mon, 26 Feb 2024 09:46:08 +0800 Subject: [PATCH 049/108] Support different kernel repo (BugFix) (#978) * Refactor and support different kernel repo * Separate the lowlatncey kernel checking and add the docstring * Land the unittest for check_prerelease.py * Update providers/base/bin/check_prerelease.py Co-authored-by: Pierre Equoy --------- Co-authored-by: Pierre Equoy --- providers/base/bin/check_prerelease.py | 130 ++++++--- providers/base/tests/test_check_prerelease.py | 259 ++++++++++++++++++ 2 files changed, 356 insertions(+), 33 deletions(-) create mode 100644 providers/base/tests/test_check_prerelease.py diff --git a/providers/base/bin/check_prerelease.py b/providers/base/bin/check_prerelease.py index 68de98252e..d08e455c21 100755 --- a/providers/base/bin/check_prerelease.py +++ b/providers/base/bin/check_prerelease.py @@ -27,7 +27,8 @@ Usage: check-prerelease.py """ - +import logging +import os import platform import shlex import sys @@ -35,64 +36,127 @@ from subprocess import CalledProcessError, check_output -def check_kernel_status(): +def get_apt_cache_information(command: str): + """ Execute the given apt-cache command and return the information. + + This function runs the specified apt-cache command using the `check_output` + function, which returns the information about the Linux kernel package + queried by the command. + + :param command: A string representing the apt-cache command to be executed. + + :return: + A string containing the information retrieved from the apt-cache + command. + + :raises CalledProcessError: + If the apt-cache command returns an empty string with exit code 0, + indicating a non-existent package. + :raises SystemExit: + If the apt-cache command returns an error status, indicating that + the kernel does not match any installed package. + """ + try: + aptinfo = check_output(shlex.split(command), universal_newlines=True) + # "apt-cache showpkg" returns an empty string with exit code 0 if + # called on a non-existent package. + if not aptinfo: + raise CalledProcessError(returncode=1, cmd=command) + return aptinfo + except CalledProcessError as e: + # "apt-cache show" returns an error status if called on a + # non-existent package. + logging.error(e) + logging.error( + "* Kernel does not match any installed package!") + raise SystemExit(1) + + +def verify_apt_cache_showpkg(kernel_release: str): """Check kernel to see if it's supported for certification + by "apt-cache showpkg linux-image-" :returns: True if OK, False if not """ - kernel_release = platform.release() - - retval = True command = "apt-cache showpkg linux-image-{}".format(kernel_release) - aptinfo = check_output(shlex.split(command), universal_newlines=True) - + aptinfo = get_apt_cache_information(command) # Exclude kernels that come from obvious PPAs.... + retval = True if "ppa.launchpad.net" in aptinfo: - print("* Kernel appears to have come from a PPA!") + logging.error("* Kernel appears to have come from a PPA!") retval = False - # Exclude kernels that don't come from the main repo - if "main_binary" not in aptinfo: - print("* Kernel does not come from the main Ubuntu repository!") + # Exclude kernels that don't come from the specific Ubuntu repository + target_repo = os.environ.get("KERNEL_REPO", "main") + if "{}_binary".format(target_repo) not in aptinfo: + logging.error( + "* Kernel does not come from the {} Ubuntu repository!".format( + target_repo)) retval = False + return retval - try: - command = "apt-cache show linux-image-{}".format(kernel_release) - aptinfo = check_output(shlex.split(command), universal_newlines=True) - except CalledProcessError: - # "apt-cache show" returns an error status if called on a - # non-existent package. - print("* Kernel does not match any installed package!") - aptinfo = "" - retval = False + +def verify_apt_cache_show(kernel_release: str): + """Check kernel to see if it's supported for certification + by "apt-cache show linux-image-" + + :returns: + True if OK, False if not + """ + command = "apt-cache show linux-image-{}".format(kernel_release) + aptinfo = get_apt_cache_information(command) + retval = True # Exclude 'edge' kernels, which are identified via the 'Source:' line # in the apt-cache show output.... - if "Source: linux-signed-hwe-edge" in aptinfo: - print("* Kernel is an 'edge' kernel!") - retval = False - if "Source: linux-hwe-edge" in aptinfo: - print("* Kernel is an 'edge' kernel!") - retval = False + for source in ["Source: linux-signed-hwe-edge", "Source: linux-hwe-edge"]: + if source in aptinfo: + logging.error("* Kernel is an 'edge' kernel!, found '{}'") + retval = False # Exclude kernels that aren't from the "linux" (or variant, like # "linux-hwe" or "linux-signed") source.... if "Source: linux" not in aptinfo: - print("* Kernel is not a Canonical kernel!") + logging.error("* Kernel is not a Canonical kernel!") retval = False + return retval + + +def verify_not_lowlatency_kernel(kernel_release: str): + """Check kernel to see if it's supported for certification + by verifying the "lowlatency" term not in kernel string + + :returns: + True if OK, False if not + """ # Exclude low-latency kernels, which are identified via the kernel name # string itself.... if "lowlatency" in kernel_release: - print("* Kernel is a low-latency kernel!") - retval = False + logging.error("* Kernel is a low-latency kernel!") + return False + return True - if (not retval): - print("* Kernel release is {}".format(kernel_release)) - print("* Kernel is ineligible for certification!") - return retval +def check_kernel_status(): + """Check kernel to see if it's supported for certification + + :returns: + True if OK, False if not + """ + kernel_release = platform.release() + logging.info("* Kernel release is {}".format(kernel_release)) + + is_valid_kernel = True + is_valid_kernel &= verify_apt_cache_showpkg(kernel_release) + is_valid_kernel &= verify_apt_cache_show(kernel_release) + is_valid_kernel &= verify_not_lowlatency_kernel(kernel_release) + + if not is_valid_kernel: + logging.error("* Kernel is ineligible for certification!") + + return is_valid_kernel def check_os_status(): diff --git a/providers/base/tests/test_check_prerelease.py b/providers/base/tests/test_check_prerelease.py new file mode 100644 index 0000000000..eb13ec2fc8 --- /dev/null +++ b/providers/base/tests/test_check_prerelease.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# Written by: +# Patrick Chang +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import logging +import unittest +from unittest.mock import patch +from subprocess import CalledProcessError +from check_prerelease import ( + check_kernel_status, + verify_apt_cache_show, + verify_apt_cache_showpkg, + get_apt_cache_information, + verify_not_lowlatency_kernel +) + + +class TestGetAptCacheInformation(unittest.TestCase): + @patch("check_prerelease.check_output") + def test_get_apt_cache_information_success(self, mock_check_output): + command = "some_apt_cache_command" + expected_output = "some_information" + mock_check_output.return_value = expected_output + result = get_apt_cache_information(command) + self.assertEqual(result, expected_output) + + @patch("check_prerelease.check_output") + def test_get_apt_cache_information_empty_output(self, mock_check_output): + """Test when getting the empty output from apt-cache showpkg + command + """ + command = "some_apt_cache_command" + mock_check_output.return_value = '' + + with self.assertRaises(SystemExit) as context: + get_apt_cache_information(command) + + self.assertEqual(context.exception.code, 1) + + @patch("check_prerelease.check_output") + def test_get_apt_cache_information_nonexistent_package( + self, + mock_check_output + ): + command = "some_apt_cache_command" + mock_check_output.side_effect = CalledProcessError( + returncode=0, cmd=command) + + with self.assertRaises(SystemExit) as context: + get_apt_cache_information(command) + + self.assertEqual(context.exception.code, 1) + + +class TestVerifyNotLowlatencyKernel(unittest.TestCase): + @classmethod + def setUpClass(cls): + logging.disable(logging.CRITICAL) + + @classmethod + def tearDownClass(cls): + logging.disable(logging.NOTSET) + + def test_normal_kernel(self): + """ Test when the kernel release does not contain "lowlatency" """ + kernel_release = "4.15.0-76-generic" + result = verify_not_lowlatency_kernel(kernel_release) + self.assertTrue(result) + + def test_lowlatency_kernel(self): + """ Test when the kernel release contains "lowlatency" """ + kernel_release = "4.15.0-76-lowlatency" + result = verify_not_lowlatency_kernel(kernel_release) + self.assertFalse(result) + + +class TestVerifyAptCacheShowpkg(unittest.TestCase): + @classmethod + def setUpClass(cls): + logging.disable(logging.CRITICAL) + + @classmethod + def tearDownClass(cls): + logging.disable(logging.NOTSET) + + @patch("check_prerelease.get_apt_cache_information") + @patch("check_prerelease.os") + def test_verify_apt_cache_showpkg_valid_kernel_from_main_repository( + self, + mock_os, + mock_get_apt_cache_information + ): + test_kernel = "1.2.3-generic" + mock_os.environ.get.return_value = "main" + mock_get_apt_cache_information.return_value = "ubuntu.com_ubuntu_dists_jammy-updates_main_binary-amd64_Packages" # noqa E501 + result = verify_apt_cache_showpkg(test_kernel) + + self.assertTrue(result) + mock_get_apt_cache_information.assert_called_with( + "apt-cache showpkg linux-image-{}".format(test_kernel)) + + @patch("check_prerelease.get_apt_cache_information") + @patch("check_prerelease.os") + def test_verify_apt_cache_showpkg_valid_kernel_from_universe_repository( + self, + mock_os, + mock_get_apt_cache_information + ): + test_kernel = "1.2.3-generic" + mock_os.environ.get.return_value = "universe" + mock_get_apt_cache_information.return_value = "ubuntu.com_ubuntu_dists_jammy-updates_universe_binary-amd64_Packages" # noqa E501 + result = verify_apt_cache_showpkg(test_kernel) + + self.assertTrue(result) + mock_get_apt_cache_information.assert_called_with( + "apt-cache showpkg linux-image-{}".format(test_kernel)) + + @patch("check_prerelease.get_apt_cache_information") + @patch("check_prerelease.os") + def test_verify_apt_cache_showpkg_invalid_kernel_from_a_ppa( + self, + mock_os, + mock_get_apt_cache_information + ): + test_kernel = "1.2.3-generic" + mock_os.environ.get.return_value = "main" + mock_get_apt_cache_information.return_value = "ubuntu.com_ubuntu_dists_jammy-updates_main_binary-amd64_Packages\nppa.launchpad.net\n" # noqa E501 + result = verify_apt_cache_showpkg(test_kernel) + + self.assertFalse(result) + mock_get_apt_cache_information.assert_called_with( + "apt-cache showpkg linux-image-{}".format(test_kernel)) + + @patch("check_prerelease.get_apt_cache_information") + @patch("check_prerelease.os") + def test_verify_apt_cache_showpkg_invalid_kernel_from_invalid_repository( + self, + mock_os, + mock_get_apt_cache_information + ): + test_kernel = "1.2.3-generic" + mock_os.environ.get.return_value = "main" + mock_get_apt_cache_information.return_value = "ubuntu.com_ubuntu_dists_jammy-updates_OTHER_binary-amd64_Packages" # noqa E501 + result = verify_apt_cache_showpkg(test_kernel) + + self.assertFalse(result) + mock_get_apt_cache_information.assert_called_with( + "apt-cache showpkg linux-image-{}".format(test_kernel)) + + +class TestVerifyAptCacheShow(unittest.TestCase): + @classmethod + def setUpClass(cls): + logging.disable(logging.CRITICAL) + + @classmethod + def tearDownClass(cls): + logging.disable(logging.NOTSET) + + @patch("check_prerelease.get_apt_cache_information") + def test_verify_apt_cache_show_success( + self, + mock_get_apt_cache_information + ): + mock_get_apt_cache_information.return_value = "Source: linux" + result = verify_apt_cache_show("kernel_test") + self.assertTrue(result) + + @patch("check_prerelease.get_apt_cache_information") + def test_verify_apt_cache_show_non_canonical_kernel( + self, + mock_get_apt_cache_information + ): + mock_get_apt_cache_information.return_value = "Source: other_source" + result = verify_apt_cache_show("kernel_test") + self.assertFalse(result) + + @patch("check_prerelease.get_apt_cache_information") + def test_verify_apt_cache_show_edge_kernel( + self, + mock_get_apt_cache_information + ): + mock_get_apt_cache_information.return_value = "Source: linux-signed-hwe-edge" # noqa E501 + result = verify_apt_cache_show("kernel_test") + self.assertFalse(result) + mock_get_apt_cache_information.return_value = "Source: linux-hwe-edge" + result = verify_apt_cache_show("kernel_test") + self.assertFalse(result) + + +class TestCheckKernelStatus(unittest.TestCase): + @classmethod + def setUpClass(cls): + logging.disable(logging.CRITICAL) + + @classmethod + def tearDownClass(cls): + logging.disable(logging.NOTSET) + + @patch("check_prerelease.platform") + @patch("check_prerelease.verify_not_lowlatency_kernel") + @patch("check_prerelease.verify_apt_cache_showpkg") + @patch("check_prerelease.verify_apt_cache_show") + def test_check_kernel_status_valid_kernel( + self, + mock_verify_show, + mock_verify_showpkg, + mock_verify_not_lowlatency_kernel, + mock_platform + ): + test_kernel = "99.98.0-generic" + mock_platform.release.return_value = test_kernel + mock_verify_showpkg.return_value = True + mock_verify_show.return_value = True + mock_verify_not_lowlatency_kernel.return_value = True + + result = check_kernel_status() + + self.assertTrue(result) + mock_verify_showpkg.assert_called_with(test_kernel) + mock_verify_show.assert_called_with(test_kernel) + mock_verify_not_lowlatency_kernel.assert_called_with(test_kernel) + + @patch("check_prerelease.platform") + @patch("check_prerelease.verify_not_lowlatency_kernel") + @patch("check_prerelease.verify_apt_cache_showpkg") + @patch("check_prerelease.verify_apt_cache_show") + def test_check_kernel_status_invalid_kernel( + self, + mock_verify_show, + mock_verify_showpkg, + mock_verify_not_lowlatency_kernel, + mock_platform + ): + test_kernel = "123-generic" + mock_platform.release.return_value = test_kernel + mock_verify_showpkg.return_value = True + mock_verify_show.return_value = False + mock_verify_not_lowlatency_kernel.return_value = True + + result = check_kernel_status() + + self.assertFalse(result) + mock_verify_showpkg.assert_called_with(test_kernel) + mock_verify_show.assert_called_with(test_kernel) + mock_verify_not_lowlatency_kernel.assert_called_with(test_kernel) From 0ee92e88b031ecf909b303f5b96d2751fcd29968 Mon Sep 17 00:00:00 2001 From: LiaoU3 <58060146+LiaoU3@users.noreply.github.com> Date: Tue, 27 Feb 2024 16:53:53 +0800 Subject: [PATCH 050/108] [checkbox-ce-oem] Fix wrong arg name (Bugfix) (#1019) Fix wrong arg --- contrib/checkbox-provider-ce-oem/bin/socketcan_busoff_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/checkbox-provider-ce-oem/bin/socketcan_busoff_test.py b/contrib/checkbox-provider-ce-oem/bin/socketcan_busoff_test.py index 9eec6db510..c5a60b9bfb 100755 --- a/contrib/checkbox-provider-ce-oem/bin/socketcan_busoff_test.py +++ b/contrib/checkbox-provider-ce-oem/bin/socketcan_busoff_test.py @@ -167,7 +167,7 @@ def main(): logger = init_logger() if args.debug: logger.setLevel(logging.DEBUG) - can_bus_off_test(args.dev, args.timeout) + can_bus_off_test(args.device, args.timeout) if __name__ == "__main__": From e516218dca5e58bc005e7c60a76313b34de29413 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 28 Feb 2024 10:13:50 +0100 Subject: [PATCH 051/108] Autoresume crashed/norerun sessions on interactive runs for remote (New) (#1016) * Initial implementation and metabox test * Fix test controller * Clarify test and change its name * Test should_start_via_launcher * Test decorator resumed session Minor: avoid double abandon_session * Test should_start_via_autoresume * Test automatically_resume_... * Test start_session function * Test resume_or_start_new_session * Move starting of session inside branches * Avoid crashing on incompatible sessions * Less branches for should_start_via_launcher Co-authored-by: kissiel * Apply Fixed a few typos in docstrings Co-authored-by: kissiel --------- Co-authored-by: kissiel --- .../checkbox_ng/launcher/controller.py | 102 +++++++- .../checkbox_ng/launcher/test_controller.py | 241 +++++++++++++++++- .../metabox/metabox-provider/units/resume.pxu | 16 +- .../scenarios/restart/agent_respawn.py | 42 ++- 4 files changed, 368 insertions(+), 33 deletions(-) diff --git a/checkbox-ng/checkbox_ng/launcher/controller.py b/checkbox-ng/checkbox_ng/launcher/controller.py index 6690dbc796..be5e5df6d5 100644 --- a/checkbox-ng/checkbox_ng/launcher/controller.py +++ b/checkbox-ng/checkbox_ng/launcher/controller.py @@ -41,6 +41,7 @@ from plainbox.impl.result import MemoryJobResult from plainbox.impl.color import Colorizer from plainbox.impl.config import Configuration +from plainbox.impl.session.resume import IncompatibleJobError from plainbox.impl.session.remote_assistant import RemoteSessionAssistant from plainbox.vendor import rpyc from checkbox_ng.resume_menu import ResumeMenu @@ -354,7 +355,83 @@ def quitter(msg): break return self._has_anything_failed - def resume_or_start_new_session(self): + def should_start_via_launcher(self): + """ + Determines if the controller should automatically select a test plan + if given a launcher. Raises if the launcher tries to skip the test plan + selection without providing the test plan that must be automatically + selected + """ + tp_forced = self.launcher.get_value("test plan", "forced") + chosen_tp = self.launcher.get_value("test plan", "unit") + if tp_forced and not chosen_tp: + raise SystemExit("The test plan selection was forced but no unit was provided") # split me into lines + return tp_forced + + @contextlib.contextmanager + def _resumed_session(self, session_id): + """ + Used to temporarily resume a session to inspect it, abandoning it + before exiting the context + """ + try: + yield self.sa.resume_session(session_id) + finally: + self.sa.abandon_session() + + def should_start_via_autoresume(self) -> bool: + """ + Determines if the controller should automatically resume a previously + abandoned session. + + A session is automatically resumed if: + - A testplan was selected before abandoning + - A job was in progress when the session was abandoned + - The ongoing test was shell job + """ + try: + last_abandoned_session = next(self.sa.get_resumable_sessions()) + except StopIteration: + # no session to resume + return False + # resume session in agent to be able to peek at the latest job run + # info + # FIXME: IncompatibleJobError is raised if the resume candidate is + # invalid, this is a workaround till get_resumable_sessions is + # fixed + with contextlib.suppress(IncompatibleJobError), self._resumed_session( + last_abandoned_session.id + ) as metadata: + app_blob = json.loads(metadata.app_blob.decode("UTF-8")) + + if not app_blob.get("testplan_id"): + self.sa.abandon_session() + return False + + self.sa.select_test_plan(app_blob["testplan_id"]) + self.sa.bootstrap() + + if not metadata.running_job_name: + return False + + job_state = self.sa.get_job_state(metadata.running_job_name) + if job_state.job.plugin != "shell": + return False + return True + # last resumable session is incompatible + return False + + def automatically_start_via_launcher(self): + _ = self.start_session() + tp_unit = self.launcher.get_value("test plan", "unit") + self.select_tp(tp_unit) + self.select_jobs(self.jobs) + + def automatically_resume_last_session(self): + last_abandoned_session = next(self.sa.get_resumable_sessions()) + self.sa.resume_by_id(last_abandoned_session.id) + + def start_session(self): _logger.info("controller: Starting new session.") configuration = dict() configuration["launcher"] = self._launcher_text @@ -366,19 +443,15 @@ def resume_or_start_new_session(self): _logger.warning("Agent is using sideloaded providers") except RuntimeError as exc: raise SystemExit(exc.args[0]) from exc - if self.launcher.get_value("test plan", "forced"): - tp_unit = self.launcher.get_value("test plan", "unit") - if not tp_unit: - _logger.error( - _( - "The test plan selection was forced but no unit was provided" - ) - ) - raise SystemExit(1) - self.select_tp(tp_unit) - self.select_jobs(self.jobs) + return tps + + def resume_or_start_new_session(self): + if self.should_start_via_autoresume(): + self.automatically_resume_last_session() + elif self.should_start_via_launcher(): + self.automatically_start_via_launcher() else: - self.interactively_choose_tp(tps) + self.interactively_choose_tp() self.run_jobs() @@ -492,7 +565,8 @@ def _resume_session(self, resume_params): result_dict["outcome"] = None self.sa.resume_by_id(resume_params.session_id, result_dict) - def interactively_choose_tp(self, tps): + def interactively_choose_tp(self): + tps = self.start_session() _logger.info("controller: Interactively choosing TP.") something_got_chosen = False while not something_got_chosen: diff --git a/checkbox-ng/checkbox_ng/launcher/test_controller.py b/checkbox-ng/checkbox_ng/launcher/test_controller.py index 67fddf1cf7..a60578a1a9 100644 --- a/checkbox-ng/checkbox_ng/launcher/test_controller.py +++ b/checkbox-ng/checkbox_ng/launcher/test_controller.py @@ -20,6 +20,7 @@ import socket from unittest import TestCase, mock +from functools import partial from checkbox_ng.urwid_ui import ResumeInstead from checkbox_ng.launcher.controller import RemoteController @@ -245,13 +246,33 @@ def test_restart(self): def test_resume_or_start_new_session_interactive(self): self_mock = mock.MagicMock() - self_mock.sa.sideloaded_providers = True # trigger the warning - # the session is not interactive - self_mock.launcher.get_value.return_value = False + self_mock.should_start_via_autoresume.return_value = False + self_mock.should_start_via_launcher.return_value = False RemoteController.resume_or_start_new_session(self_mock) self.assertTrue(self_mock.interactively_choose_tp.called) + self.assertTrue(self_mock.run_jobs.called) + + def test_resume_or_start_new_session_auto_last_session(self): + self_mock = mock.MagicMock() + self_mock.should_start_via_autoresume.return_value = True + self_mock.should_start_via_launcher.return_value = False + + RemoteController.resume_or_start_new_session(self_mock) + + self.assertTrue(self_mock.automatically_resume_last_session.called) + self.assertTrue(self_mock.run_jobs.called) + + def test_resume_or_start_new_session_auto_launcher(self): + self_mock = mock.MagicMock() + self_mock.should_start_via_autoresume.return_value = False + self_mock.should_start_via_launcher.return_value = True + + RemoteController.resume_or_start_new_session(self_mock) + + self.assertTrue(self_mock.automatically_start_via_launcher.called) + self.assertTrue(self_mock.run_jobs.called) @mock.patch("checkbox_ng.launcher.controller.SimpleUI") def test__run_jobs_description_command_none(self, simple_ui_mock): @@ -778,11 +799,56 @@ def test_resume_session_rerun( }, ) + def test_should_start_via_launcher_true(self): + self_mock = mock.MagicMock() + + def get_value_mock(top_level, attribute): + if top_level == "test plan": + if attribute == "forced": + return True + elif attribute == "unit": + return "tp_unit_id" + return mock.MagicMock() + + self_mock.launcher.get_value = get_value_mock + + self.assertTrue(RemoteController.should_start_via_launcher(self_mock)) + + def test_should_start_via_launcher_false(self): + self_mock = mock.MagicMock() + + def get_value_mock(top_level, attribute): + if top_level == "test plan": + if attribute == "forced": + return False + elif attribute == "unit": + return "tp_unit_id" + return mock.MagicMock() + + self_mock.launcher.get_value = get_value_mock + + self.assertFalse(RemoteController.should_start_via_launcher(self_mock)) + + def test_should_start_via_launcher_exit(self): + self_mock = mock.MagicMock() + + def get_value_mock(top_level, attribute): + if top_level == "test plan": + if attribute == "forced": + return True + elif attribute == "unit": + return None + return mock.MagicMock() + + self_mock.launcher.get_value = get_value_mock + with self.assertRaises(SystemExit): + RemoteController.should_start_via_launcher(self_mock) + def test_interactively_choose_tp(self): self_mock = mock.MagicMock() # by default always try to start a new session and not resuming - RemoteController.interactively_choose_tp(self_mock, []) + RemoteController.interactively_choose_tp(self_mock) self.assertTrue(self_mock._new_session_flow.called) self.assertFalse(self_mock._resume_session_menu.called) @@ -792,7 +858,7 @@ def test_interactively_choose_tp_resume(self): self_mock._new_session_flow.side_effect = ResumeInstead self_mock._resume_session_menu.return_value = True - RemoteController.interactively_choose_tp(self_mock, []) + RemoteController.interactively_choose_tp(self_mock) self.assertTrue(self_mock._new_session_flow.called) self.assertTrue(self_mock._resume_session_menu.called) @@ -802,11 +868,174 @@ def test_interactively_choose_tp_resume_retry_tp(self): self_mock._new_session_flow.side_effect = [ResumeInstead, True] self_mock._resume_session_menu.return_value = True - RemoteController.interactively_choose_tp(self_mock, []) + RemoteController.interactively_choose_tp(self_mock) self.assertTrue(self_mock._new_session_flow.called) self.assertTrue(self_mock._resume_session_menu.called) + def test__resumed_session(self): + self_mock = mock.MagicMock() + + with RemoteController._resumed_session( + self_mock, "session_id" + ) as metadata: + self.assertEqual( + self_mock.sa.resume_session.return_value, metadata + ) + self.assertTrue(self_mock.sa.resume_session.called) + self.assertTrue(self_mock.sa.abandon_session.called) + + def test_should_start_via_autoresume_true(self): + last_session_mock = mock.MagicMock() + self_mock = mock.MagicMock() + self_mock.sa.get_resumable_sessions.return_value = iter( + [last_session_mock] + ) + + self_mock._resumed_session = partial( + RemoteController._resumed_session, self_mock + ) + metadata = self_mock.sa.resume_session() + metadata.app_blob = b""" + { + "testplan_id" : "testplan_id" + } + """ + metadata.running_job_name = "job_id" + + self_mock.sa.get_job_state.return_value.job.plugin = "shell" + + self.assertTrue( + RemoteController.should_start_via_autoresume(self_mock) + ) + + self.assertTrue(self_mock.sa.select_test_plan.called) + self.assertTrue(self_mock.sa.bootstrap.called) + + def test_should_start_via_autoresume_no_resumable_sessions(self): + self_mock = mock.MagicMock() + self_mock.sa.get_resumable_sessions.return_value = iter( + [] + ) # No resumable sessions + + self.assertFalse( + RemoteController.should_start_via_autoresume(self_mock) + ) + + def test_should_start_via_autoresume_no_testplan_id_in_app_blob(self): + self_mock = mock.MagicMock() + last_session_mock = mock.MagicMock() + self_mock.sa.get_resumable_sessions.return_value = iter( + [last_session_mock] + ) + + self_mock._resumed_session = partial( + RemoteController._resumed_session, self_mock + ) + metadata = self_mock.sa.resume_session() + metadata.app_blob = b"{}" + + self.assertFalse( + RemoteController.should_start_via_autoresume(self_mock) + ) + self.assertTrue(self_mock.sa.abandon_session.called) + + def test_should_start_via_autoresume_no_running_job_name(self): + self_mock = mock.MagicMock() + last_session_mock = mock.MagicMock() + self_mock.sa.get_resumable_sessions.return_value = iter( + [last_session_mock] + ) + + self_mock._resumed_session = partial( + RemoteController._resumed_session, self_mock + ) + metadata = self_mock.sa.resume_session() + metadata.app_blob = b'{"testplan_id" : "testplan_id"}' + metadata.running_job_name = "" + + self.assertFalse( + RemoteController.should_start_via_autoresume(self_mock) + ) + + def test_should_start_via_autoresume_job_plugin_not_shell(self): + self_mock = mock.MagicMock() + last_session_mock = mock.MagicMock() + self_mock.sa.get_resumable_sessions.return_value = iter( + [last_session_mock] + ) + + self_mock._resumed_session = partial( + RemoteController._resumed_session, self_mock + ) + metadata = self_mock.sa.resume_session() + metadata.app_blob = b'{"testplan_id" : "testplan_id"}' + metadata.running_job_name = "job_id" + + job_state_mock = mock.MagicMock() + job_state_mock.job.plugin = "user-interact" + self_mock.sa.get_job_state.return_value = job_state_mock + + self.assertFalse( + RemoteController.should_start_via_autoresume(self_mock) + ) + + def test_automatically_start_via_launcher(self): + self_mock = mock.MagicMock() + + RemoteController.automatically_start_via_launcher(self_mock) + + self.assertTrue(self_mock.select_tp.called) + self.assertTrue(self_mock.select_jobs.called) + + def test_automatically_resume_last_session(self): + self_mock = mock.MagicMock() + + RemoteController.automatically_resume_last_session(self_mock) + + self.assertTrue(self_mock.sa.get_resumable_sessions.called) + self.assertTrue(self_mock.sa.resume_by_id.called) + + def test_start_session_success(self): + self_mock = mock.MagicMock() + self_mock._launcher_text = "launcher_example" + self_mock._normal_user = True + expected_configuration = { + "launcher": "launcher_example", + "normal_user": True, + } + + self_mock.sa.start_session.return_value = "session_started" + + tps = RemoteController.start_session(self_mock) + + self_mock.sa.start_session.assert_called_once_with( + expected_configuration + ) + self.assertEqual(tps, "session_started") + + def test_start_session_with_sideloaded_providers(self): + self_mock = mock.MagicMock() + self_mock._launcher_text = "launcher_example" + self_mock._normal_user = True + self_mock.sa.sideloaded_providers = True + + self_mock.sa.start_session.return_value = "session_started" + + RemoteController.start_session(self_mock) + + def test_start_session_runtime_error(self): + self_mock = mock.MagicMock() + self_mock._launcher_text = "launcher_example" + self_mock._normal_user = True + self_mock.sa.start_session.side_effect = RuntimeError( + "Failed to start session" + ) + + with self.assertRaises(SystemExit) as _: + RemoteController.start_session(self_mock) + + class IsHostnameALoopbackTests(TestCase): @mock.patch("socket.gethostbyname") @mock.patch("ipaddress.ip_address") diff --git a/metabox/metabox/metabox-provider/units/resume.pxu b/metabox/metabox/metabox-provider/units/resume.pxu index cdbb6fe0d4..c2638b4ead 100644 --- a/metabox/metabox/metabox-provider/units/resume.pxu +++ b/metabox/metabox/metabox-provider/units/resume.pxu @@ -1,20 +1,22 @@ -id: agent-crasher -_summary: Crash the agent +id: checkbox-crasher +_summary: Crash Checkbox flags: simple user: root command: - kill `ps aux|grep run-agent|grep -v 'grep' | awk '{print $2}'` + PID=`ps -o ppid= $$` + kill $PID id: reboot-emulator _summary: Emulate the reboot flags: simple noreturn user: root command: - kill `ps aux|grep run-agent|grep -v 'grep' | awk '{print $2}'` + PID=`ps -o ppid= $$` + kill $PID unit: test plan -id: agent-resume-crash-then-reboot -_name: Agent resume after crash then reboot +id: checkbox-crash-then-reboot +_name: Checkbox crash then reboot include: - agent-crasher + checkbox-crasher reboot-emulator diff --git a/metabox/metabox/scenarios/restart/agent_respawn.py b/metabox/metabox/scenarios/restart/agent_respawn.py index c5529c447c..9a45e7db54 100644 --- a/metabox/metabox/scenarios/restart/agent_respawn.py +++ b/metabox/metabox/scenarios/restart/agent_respawn.py @@ -15,13 +15,20 @@ # along with Checkbox. If not, see . import textwrap -from metabox.core.actions import AssertPrinted, AssertRetCode +from metabox.core import keys +from metabox.core.actions import ( + AssertPrinted, + AssertRetCode, + SelectTestPlan, + Send, + Expect, +) from metabox.core.scenario import Scenario from metabox.core.utils import tag -@tag("resume") -class ResumeAfterCrash(Scenario): +@tag("resume", "automatic") +class ResumeAfterCrashAuto(Scenario): modes = ["remote"] launcher = textwrap.dedent( """ @@ -29,7 +36,7 @@ class ResumeAfterCrash(Scenario): launcher_version = 1 stock_reports = text [test plan] - unit = 2021.com.canonical.certification::agent-resume-crash-then-reboot + unit = 2021.com.canonical.certification::checkbox-crash-then-reboot forced = yes [test selection] forced = yes @@ -39,6 +46,29 @@ class ResumeAfterCrash(Scenario): ) steps = [ AssertRetCode(1), - AssertPrinted("job crashed : Crash the agent"), - AssertPrinted("job passed : Emulate the reboot"), + AssertPrinted("job crashed"), + AssertPrinted("Crash Checkbox"), + AssertPrinted("job passed"), + AssertPrinted("Emulate the reboot"), + ] + + +@tag("resume", "manual") +class ResumeAfterCrashManual(Scenario): + modes = ["remote"] + launcher = "# no launcher" + steps = [ + Expect("Select test plan"), + SelectTestPlan( + "2021.com.canonical.certification::checkbox-crash-then-reboot" + ), + Send(keys.KEY_ENTER), + Expect("Press (T) to start"), + Send("T"), + Expect("Select jobs to re-run"), + Send("F"), + Expect("job crashed"), + Expect("Crash Checkbox"), + Expect("job passed"), + Expect("Emulate the reboot"), ] From 21eae29b68ea6b841afa44a14ce156dd9a810212 Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Wed, 28 Feb 2024 15:33:13 +0100 Subject: [PATCH 052/108] Revert libde265 change on armhf (Bugfix) (#1024) Revert "Strip the executable stack bit on libde265.so.0.1.1, armhf only (BugFix) (#829)" This change is not needed anymore since we are building opencv from source --- checkbox-core-snap/series22/snap/snapcraft.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/checkbox-core-snap/series22/snap/snapcraft.yaml b/checkbox-core-snap/series22/snap/snapcraft.yaml index c66238ccdc..aba2ccf0d2 100644 --- a/checkbox-core-snap/series22/snap/snapcraft.yaml +++ b/checkbox-core-snap/series22/snap/snapcraft.yaml @@ -273,12 +273,6 @@ parts: python3 manage.py validate python3 manage.py build python3 manage.py install --layout=relocatable --prefix=/providers/checkbox-provider-base --root="$SNAPCRAFT_PART_INSTALL" - # Strip the executable stack bit on libde265.so.0.1.1 - # Only armhf requires this modification, arm64 and amd64 both pass the - # automated store validation (i.e review-tools.snap-review) - if [ $SNAPCRAFT_TARGET_ARCH = "armhf" ]; then - execstack --clear-execstack $SNAPCRAFT_PART_INSTALL/usr/lib/*/libde265.so.0.1.1 - fi stage-packages: - bc - bluez-tests @@ -351,7 +345,6 @@ parts: - on arm64: - python3-rpi.gpio # only in focal build-packages: - - execstack - libasound2-dev - libcap-dev organize: From c3126ed586e6d5df10b7cb2c1e52a05057f9de7a Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Thu, 29 Feb 2024 16:38:24 +0800 Subject: [PATCH 053/108] Change Tox GitHub workflows naming (Infra) (#1028) * Change Tox GitHub workflows naming Most of our tox-related workflows are named "build", without any name given. Because of this, they cannot be searched easily in GitHub settings, and only "build" appears in the automated checks when submitting a PR. This commit fixes this issue. * Change GitHub workflow name for the release tools testing with tox --- .github/workflows/tox-checkbox-ng.yaml | 3 ++- .github/workflows/tox-checkbox-support.yaml | 3 ++- .github/workflows/tox-provider-base.yaml | 3 ++- .github/workflows/tox-provider-certification-client.yaml | 3 ++- .github/workflows/tox-provider-certification-server.yaml | 3 ++- .github/workflows/tox-provider-docker.yaml | 3 ++- .github/workflows/tox-provider-gpgpu.yaml | 3 ++- .github/workflows/tox-provider-iiotg.yaml | 3 ++- .github/workflows/tox-provider-resource.yaml | 3 ++- .github/workflows/tox-provider-sru.yaml | 3 ++- .github/workflows/tox-provider-tpm2.yaml | 3 ++- .github/workflows/tox-tools-release.yaml | 3 ++- .github/workflows/validate_workflows.yaml | 3 ++- 13 files changed, 26 insertions(+), 13 deletions(-) diff --git a/.github/workflows/tox-checkbox-ng.yaml b/.github/workflows/tox-checkbox-ng.yaml index 734a6d9f6b..c11abd5bcc 100644 --- a/.github/workflows/tox-checkbox-ng.yaml +++ b/.github/workflows/tox-checkbox-ng.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_checkbox_ng: + name: Test checkbox-ng with tox defaults: run: working-directory: checkbox-ng diff --git a/.github/workflows/tox-checkbox-support.yaml b/.github/workflows/tox-checkbox-support.yaml index fd887f1cf4..0b1d9ef6a2 100644 --- a/.github/workflows/tox-checkbox-support.yaml +++ b/.github/workflows/tox-checkbox-support.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_checkbox_support: + name: Test checkbox-support with tox defaults: run: working-directory: checkbox-support diff --git a/.github/workflows/tox-provider-base.yaml b/.github/workflows/tox-provider-base.yaml index 9ebac242e2..006ffcf3e7 100644 --- a/.github/workflows/tox-provider-base.yaml +++ b/.github/workflows/tox-provider-base.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_provider_base: + name: Test provider-base with tox defaults: run: working-directory: providers/base diff --git a/.github/workflows/tox-provider-certification-client.yaml b/.github/workflows/tox-provider-certification-client.yaml index 12608d6ac4..83bee09fdb 100644 --- a/.github/workflows/tox-provider-certification-client.yaml +++ b/.github/workflows/tox-provider-certification-client.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_provider_certification_client: + name: Test provider-certification-client with tox defaults: run: working-directory: providers/certification-client diff --git a/.github/workflows/tox-provider-certification-server.yaml b/.github/workflows/tox-provider-certification-server.yaml index 361291ce57..b8d6f529be 100644 --- a/.github/workflows/tox-provider-certification-server.yaml +++ b/.github/workflows/tox-provider-certification-server.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_provider_certification_server: + name: Test provider-certification-server with tox defaults: run: working-directory: providers/certification-server diff --git a/.github/workflows/tox-provider-docker.yaml b/.github/workflows/tox-provider-docker.yaml index 02c6b9c2ff..23b55c0462 100644 --- a/.github/workflows/tox-provider-docker.yaml +++ b/.github/workflows/tox-provider-docker.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_provider_docker: + name: Test provider-docker with tox defaults: run: working-directory: providers/docker diff --git a/.github/workflows/tox-provider-gpgpu.yaml b/.github/workflows/tox-provider-gpgpu.yaml index c99d41e047..01ba967ab2 100644 --- a/.github/workflows/tox-provider-gpgpu.yaml +++ b/.github/workflows/tox-provider-gpgpu.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_provider_gpgpu: + name: Test provider-gpgpu with tox defaults: run: working-directory: providers/gpgpu diff --git a/.github/workflows/tox-provider-iiotg.yaml b/.github/workflows/tox-provider-iiotg.yaml index cced92efa5..dcea5ca661 100644 --- a/.github/workflows/tox-provider-iiotg.yaml +++ b/.github/workflows/tox-provider-iiotg.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_provider_iiotg: + name: Test Intel IOTG provider with tox defaults: run: working-directory: providers/iiotg diff --git a/.github/workflows/tox-provider-resource.yaml b/.github/workflows/tox-provider-resource.yaml index b345b2bc4d..f2de8c81c7 100644 --- a/.github/workflows/tox-provider-resource.yaml +++ b/.github/workflows/tox-provider-resource.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_provider_resource: + name: Test provider-resource with tox defaults: run: working-directory: providers/resource diff --git a/.github/workflows/tox-provider-sru.yaml b/.github/workflows/tox-provider-sru.yaml index 6375c5239b..fc5630125d 100644 --- a/.github/workflows/tox-provider-sru.yaml +++ b/.github/workflows/tox-provider-sru.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_provider_sru: + name: Test provider-sru with tox defaults: run: working-directory: providers/sru diff --git a/.github/workflows/tox-provider-tpm2.yaml b/.github/workflows/tox-provider-tpm2.yaml index 82e9145b25..f487dca44e 100644 --- a/.github/workflows/tox-provider-tpm2.yaml +++ b/.github/workflows/tox-provider-tpm2.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_provider_tpm2: + name: Test provider-tpm2 with tox defaults: run: working-directory: providers/tpm2 diff --git a/.github/workflows/tox-tools-release.yaml b/.github/workflows/tox-tools-release.yaml index b9bbedf625..5b540d4820 100644 --- a/.github/workflows/tox-tools-release.yaml +++ b/.github/workflows/tox-tools-release.yaml @@ -12,7 +12,8 @@ on: workflow_dispatch: jobs: - build: + tox_test_release_tools: + name: Test release tools with tox defaults: run: working-directory: tools/release diff --git a/.github/workflows/validate_workflows.yaml b/.github/workflows/validate_workflows.yaml index fe23eaf5c8..b6708001a5 100644 --- a/.github/workflows/validate_workflows.yaml +++ b/.github/workflows/validate_workflows.yaml @@ -6,7 +6,8 @@ on: - '.github/workflows/*' jobs: - build: + workflow_validation: + name: Workflow validation runs-on: ubuntu-latest steps: - name: Checkout checkbox monorepo From ec0eac042d92934fa488836cc9b5b351ba24b998 Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Thu, 29 Feb 2024 21:54:21 +0800 Subject: [PATCH 054/108] Allow template ids to be used in Test Plans (New) (#1009) * Refactor qualifiers.select_jobs() function The original implementation operates on integers representing indices of a particular job in the list of jobs. It makes the code hard to read for a gain that is not very interesting, since this function is usually called 2 or 3 times during a regular Checkbox run. The following refactor does two things: 1. Operate on the jobs themsleves instead of the integers of the indices pointing to them 2. Remove code duplication by using a nested definition, _handle_vote(), to make the code easier to read * Search for matches in template_id in addition to the job's id field Test plans can now include template ids in addition to job ids. Because it's impossible to know if an item in the test plan is a job or a template, Checkbox populates the list of qualifiers with FieldQualifier instances looking for a match for the `id` field. When select_jobs(job_list, qualifier_list) is called, it will first check if the job id matches the value of the qualifier, as before. If it does not, it will check if it matches the origin of this job (its template_id). If this matches, then it means the FieldQualifier being used should be modified to search for a match in the `template_id` field instead of the `id` field. In order to achieve this, the FieldQualifier.field needs a setter. * Add unit tests for select_jobs() when template ids are used * Update Test Plan Unit reference page with the use of template ids * Cleanup Test Plan Unit reference page * Add more unit tests for select_jobs() * Remove useless condition branch * Add comment to explain why a list of included jobs is required --- .../plainbox/impl/secure/qualifiers.py | 74 +++++------ .../plainbox/impl/secure/test_qualifiers.py | 82 ++++++++++++ docs/reference/units/test-plan.rst | 120 +++++++++++------- 3 files changed, 195 insertions(+), 81 deletions(-) diff --git a/checkbox-ng/plainbox/impl/secure/qualifiers.py b/checkbox-ng/plainbox/impl/secure/qualifiers.py index d1ee659e88..7c4ab1f7cf 100644 --- a/checkbox-ng/plainbox/impl/secure/qualifiers.py +++ b/checkbox-ng/plainbox/impl/secure/qualifiers.py @@ -341,6 +341,10 @@ def field(self): """ return self._field + @field.setter + def field(self, value): + self._field = value + @property def matcher(self): """ @@ -463,7 +467,7 @@ def select_jobs(job_list, qualifier_list): # list. The horizontal axis represents jobs from job list. Dots represent # inclusion, X represents exclusion. # - # The result of the select_job() function is a list of jobs that have at + # The result of the select_jobs() function is a list of jobs that have at # least one inclusion and no exclusions. The resulting list is ordered by # increasing qualifier index. # @@ -476,8 +480,7 @@ def select_jobs(job_list, qualifier_list): # another set. # # The second step filters-out all items from the excluded job set from the - # selected job list. For extra efficiency the algorithm operates on - # integers representing the index of a particular job in job_list. + # selected job list. # # The final complexity is O(N x M) + O(M), where N is the number of # qualifiers (flattened) and M is the number of jobs. The algorithm assumes @@ -491,10 +494,27 @@ def select_jobs(job_list, qualifier_list): # # As a separate feature, we might return a list of qualifiers that never # matched anything. That may be helpful for debugging. + + # A list is needed to keep the job ordering, while the sets prevent + # duplicates. included_list = [] - id_to_index_map = {job.id: index for index, job in enumerate(job_list)} included_set = set() excluded_set = set() + + def _handle_vote(qualifier, job): + """ + Update list and sets of included/excluded jobs based on their related + qualifiers. + """ + vote = qualifier.get_vote(job) + if vote == IJobQualifier.VOTE_INCLUDE: + if job in included_set: + return + included_set.add(job) + included_list.append(job) + elif vote == IJobQualifier.VOTE_EXCLUDE: + excluded_set.add(job) + for qualifier in flat_qualifier_list: if (isinstance(qualifier, FieldQualifier) and qualifier.field == 'id' and @@ -503,36 +523,18 @@ def select_jobs(job_list, qualifier_list): # optimize the super-common case where a qualifier refers to # a specific job by using the id_to_index_map to instantly # perform the requested operation on a single job - try: - j_index = id_to_index_map[qualifier.matcher.value] - except KeyError: - # The lookup can fail if the pattern is a constant reference to - # a generated job that doens't exist yet. To maintain - # correctness we should just ignore it, as it would not - # match anything yet. - continue - job = job_list[j_index] - vote = qualifier.get_vote(job) - if vote == IJobQualifier.VOTE_INCLUDE: - if j_index in included_set: - continue - included_set.add(j_index) - included_list.append(j_index) - elif vote == IJobQualifier.VOTE_EXCLUDE: - excluded_set.add(j_index) - elif vote == IJobQualifier.VOTE_IGNORE: - pass + for job in job_list: + if job.id == qualifier.matcher.value: + _handle_vote(qualifier, job) + break + elif job.template_id == qualifier.matcher.value: + # the qualifier matches the template id this job has been + # instantiated from, need to get the vote for this job + # based on its template_id field, not its id field + qualifier.field = "template_id" + _handle_vote(qualifier, job) else: - for j_index, job in enumerate(job_list): - vote = qualifier.get_vote(job) - if vote == IJobQualifier.VOTE_INCLUDE: - if j_index in included_set: - continue - included_set.add(j_index) - included_list.append(j_index) - elif vote == IJobQualifier.VOTE_EXCLUDE: - excluded_set.add(j_index) - elif vote == IJobQualifier.VOTE_IGNORE: - pass - return [job_list[index] for index in included_list - if index not in excluded_set] + for job in job_list: + _handle_vote(qualifier, job) + return [job for job in included_list + if job not in excluded_set] diff --git a/checkbox-ng/plainbox/impl/secure/test_qualifiers.py b/checkbox-ng/plainbox/impl/secure/test_qualifiers.py index e17779606a..86e09cd023 100644 --- a/checkbox-ng/plainbox/impl/secure/test_qualifiers.py +++ b/checkbox-ng/plainbox/impl/secure/test_qualifiers.py @@ -253,6 +253,10 @@ def test_get_simple_match(self): getattr(job, self._FIELD)) self.assertEqual(result, self.matcher.match()) + def test_field_setter(self): + self.assertEqual(self.qualifier_e.field, self._FIELD) + self.qualifier_e.field = "updated" + self.assertEqual(self.qualifier_e.field, "updated") class RegExpJobQualifierTests(TestCase): """ @@ -490,6 +494,13 @@ class FunctionTests(TestCase): def setUp(self): self.origin = mock.Mock(name='origin', spec_set=Origin) + def test_select_jobs__empty_qualifier_list(self): + """ + verify that select_jobs() returns an empty list if no qualifiers are + passed + """ + self.assertEqual(select_jobs([], []), []) + def test_select_jobs__inclusion(self): """ verify that select_jobs() honors qualifier ordering @@ -525,3 +536,74 @@ def test_select_jobs__exclusion(self): self.assertEqual( select_jobs(job_list, [qual_all, qual_not_c]), [job_a, job_b]) + + def test_select_jobs__id_field_qualifier(self): + """ + verify that select_jobs() only returns the job that matches a given + FieldQualifier + """ + job_a = JobDefinition({'id': 'a'}) + job_b = JobDefinition({'id': 'b'}) + job_c = JobDefinition({'id': 'c'}) + matcher = OperatorMatcher(operator.eq, "a") + qual = FieldQualifier("id", matcher, self.origin, True) + job_list = [job_a, job_b, job_c] + expected_list = [job_a] + self.assertEqual(select_jobs(job_list, [qual]), expected_list) + + def test_select_jobs__id_field_qualifier_twice(self): + """ + verify that select_jobs() only returns the job that matches a given + FieldQualifier once, even if it has been added twice + """ + job_a = JobDefinition({'id': 'a'}) + matcher = OperatorMatcher(operator.eq, "a") + qual = FieldQualifier("id", matcher, self.origin, True) + job_list = [job_a, job_a] + expected_list = [job_a] + self.assertEqual(select_jobs(job_list, [qual, qual]), expected_list) + + def test_select_jobs__template_id_field_qualifier(self): + """ + verify that select_jobs() only returns the jobs that have been + instantiated using a given template + """ + job_a = JobDefinition({ + "id": "a", + }) + templated_job_b = JobDefinition({ + "id": "b", + "template-id": "test-template", + }) + templated_job_c = JobDefinition({ + "id": "c", + "template-id": "test-template", + }) + matcher = OperatorMatcher(operator.eq, "test-template") + qual = FieldQualifier("id", matcher, self.origin, True) + job_list = [job_a, templated_job_b, templated_job_c] + expected_list = [templated_job_b, templated_job_c] + self.assertEqual(select_jobs(job_list, [qual]), expected_list) + + def test_select_jobs__excluded_templated_job(self): + """ + verify that if a template id is included in the test plan, jobs that + have been instantiated from it can still be excluded from the list of + selected jobs + """ + templated_job_a = JobDefinition({ + "id": "a", + "template-id": "test-template", + }) + templated_job_b = JobDefinition({ + "id": "b", + "template-id": "test-template", + }) + matcher_incl = OperatorMatcher(operator.eq, "test-template") + matcher_excl = OperatorMatcher(operator.eq, "b") + qual_incl = FieldQualifier("id", matcher_incl, self.origin, True) + qual_excl = FieldQualifier("id", matcher_excl, self.origin, False) + job_list = [templated_job_a, templated_job_b] + qualifiers = [qual_incl, qual_excl] + expected_list = [templated_job_a] + self.assertEqual(select_jobs(job_list, qualifiers), expected_list) diff --git a/docs/reference/units/test-plan.rst b/docs/reference/units/test-plan.rst index 53eb459f15..d3b008f52c 100644 --- a/docs/reference/units/test-plan.rst +++ b/docs/reference/units/test-plan.rst @@ -4,25 +4,31 @@ Test Plan Unit ============== -The test plan unit is a facility that describes a sequence of job definitions -that should be executed together. +The test plan unit is a facility that describes a sequence of :ref:`jobs +` or :ref:`templates ` that should be executed together. -Jobs definitions are _selected_ by either listing their identifier or a regular -expression that matches their identifier. Selected jobs are executed in the -sequence they appear in the list, unless they need to be reordered to satisfy -dependencies which always take priority. +Jobs are selected by either listing their identifier or a regular expression +that matches their identifier. Listing a template identifier will select all +the jobs instantiated by it. Selected jobs are executed in the sequence they +appear in the list, unless they need to be reordered to satisfy dependencies +which always take priority. It is also possible to :ref:`exclude` jobs from the selection using the same principles. + +Test plans can be :ref:`nested`, so you can +define several smaller test plans and combine them into a bigger one, which +helps with maintenance and flexibility. Test plans can contain additional meta-data which can be used in a graphical -user interface. You can assign a translatable name and -description to each test plan. +user interface. You can assign a translatable name and description to each +test plan. Test plans are also typical units so they can be defined with the familiar -RFC822-like syntax that is also used for job definitions. They can also be -multiple test plan definitions per file, just like with all the other units, -including job definitions. +:ref:`RFC822-like syntax` that is also used for job definitions. There +can also be multiple test plan definitions per file, just like with all the +other units, including job definitions. Test Plan Fields ------------------ +================ The following fields can be used in a test plan. Note that **not all** fields need to be used or even should be used. Please remember that Checkbox needs to @@ -83,36 +89,38 @@ copy such constructs when working on a new test plan from scratch .. _Test Plan include field: ``include``: - A multi-line list of job identifiers or patterns matching such identifiers - that should be included for execution. + A multi-line list of job identifiers, patterns matching such identifiers or + template identifiers that should be included for execution. This is the most important field in any test plan. It basically decides on which job definitions are selected by (included by) the test plan. Separate entries need to be placed on separate lines. White space does not - separate entries as the id field may (sic!) actually include spaces. + separate entries as the ``id`` field may actually include spaces. - You have two options for selecting tests: + There are several options for selecting test cases: - - You can simply list the identifier (either partial or fully qualified) - of the job you want to include in the test plan directly. This is very - common and most test plans used by Checkbox actually look like that. + - You can simply list the identifier (either partial or fully qualified) + of the job you want to include in the test plan directly. This is very + common and most test plans used by Checkbox actually look like that. + - You can also select many test cases at the same time, for instance to + select jobs generated by :ref:`template units`, by: - - You can use regular expressions to select many tests at the same time. - This is the only way to select generated jobs (created by template - units). Please remember that the dot character has a special meaning so - unless you actually want to match *any character* escape the dot with - the backslash character (\\). + - using regular expressions. For example, ``storage-.*`` would select + ``storage-sda``, ``storage-sdb``, etc. depending on the + ``storage-{disk}`` template and the disks on the :term:`DUT`. + - using the template identifier. This will include all the jobs generated + by this template. - Regardless of if you use patterns or literal job identifiers you can use - their fully qualified name (the one that includes the namespace they reside - in) or an abbreviated form. The abbreviated form is applicable for job - definitions that reside in the same namespace (but not necessarily the same - provider) as the provider that is defining the test plan. + Regardless if you use patterns or literal identifiers, you can use their + fully qualified name (the one that includes the :term:`namespace` they + reside in) or an abbreviated form. The abbreviated form is applicable for + jobs and templates that reside in the same namespace (but not necessarily + the same provider) as the provider that is defining the test plan. - Plainbox will catch incorrect references to unknown jobs so you should - be relatively safe. Have a look at the examples section below for examples - on how you can refer to jobs from other providers (you simply use their - fully qualified name for that) + Checkbox will catch incorrect references to unknown jobs so you should + be relatively safe. Have a look at the :ref:`test-plan-examples` section + below for examples on how you can refer to jobs from other providers + (you simply use their fully qualified name for that). .. _Test Plan mandatory_include field: @@ -125,16 +133,14 @@ copy such constructs when working on a new test plan from scratch info about the tested system, as it renders impossible to generate a report with no information about system under test. - For example, session results meant to be sent to the Ubuntu certification - website must include the special job: miscellanea/submission-resources - - Example: + For example, session results meant to be sent to the Ubuntu :term:`certification + website` must include the special job ``miscellanea/submission-resources``:: mandatory_include: miscellanea/submission-resources Note that mandatory jobs will always be run first (along with their - dependent jobs) + dependent jobs). .. _Test Plan bootstrap_include field: @@ -144,7 +150,7 @@ copy such constructs when working on a new test plan from scratch bootstrapping sections are the ones generating or helping to generate other jobs. - Example: + Example:: bootstrap_include: graphics/generator_driver_version @@ -260,8 +266,10 @@ copy such constructs when working on a new test plan from scratch mis-estimates from all of the job definitions selected by a particular test plan. +.. _test-plan-examples: + Examples --------- +======== A simple test plan that selects several jobs:: @@ -294,17 +302,22 @@ to some of its own definitions:: multipath-io degrade-array-recovery -A test plan that generates jobs using bootstrap_include section:: +A test plan that generates jobs using the ``bootstrap_include`` section:: unit: test plan id: test-plan-with-bootstrapping _name: Tests with a bootstrapping stage _description: - This test plan uses bootstrapping_include field to generate additional - jobs depending on the output of the generator job. - include: .* + This test plan uses the `bootstrap_include` field to generate + additional jobs depending on the output of the `generator` job. The + `include` section points to a template id, so Checkbox will run the + jobs generated by this template, which are: + - generated_job_Foo + - generated_job_Bar bootstrap_include: generator + include: + generated_jobs unit: job id: generator @@ -318,13 +331,30 @@ A test plan that generates jobs using bootstrap_include section:: unit: template template-unit: job template-resource: generator + template-id: generated_jobs plugin: shell estimated_duration: 1 id: generated_job_{my_resource} command: echo {my_resource} _description: Job instantiated from template that echoes {my_resource} - +.. note:: + + Although the method above is recommended, the same result can be + obtained by modifying the ``include`` section of the test plan to use + the regular expression ``generated_job_.*`` instead of the template id + ``generated_jobs``:: + + unit: test plan + id: test-plan-with-bootstrapping + _name: Tests with a bootstrapping stage + _description: + This test plan uses bootstrapping_include field to generate additional + jobs depending on the output of the generator job. + include: + generated_job_.* + bootstrap_include: + generator A test plan that marks some jobs as mandatory:: From ddbc84b487be24b3787429ad22c4df62d3184457 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Thu, 29 Feb 2024 17:35:11 +0100 Subject: [PATCH 055/108] Fix resuming local manual jobs (bugfix) (#1025) * Always note metadata of starting job Minor: Nice message when exiting manually * Less lazy scenario, same meaning * Test _run_single_job_with_ui_loop_quit * Duplicated and outdated test * Changed text on quit Minor: uniform text across remote and local * Updated tests --- .../checkbox_ng/launcher/controller.py | 4 +- checkbox-ng/checkbox_ng/launcher/stages.py | 3 +- .../checkbox_ng/launcher/test_stages.py | 65 +++++++++++++++++++ .../cert_blocker_comment/launcher.py | 48 -------------- metabox/metabox/scenarios/ui/resume_menu.py | 37 +++++------ 5 files changed, 86 insertions(+), 71 deletions(-) create mode 100644 checkbox-ng/checkbox_ng/launcher/test_stages.py diff --git a/checkbox-ng/checkbox_ng/launcher/controller.py b/checkbox-ng/checkbox_ng/launcher/controller.py index be5e5df6d5..c08008b07c 100644 --- a/checkbox-ng/checkbox_ng/launcher/controller.py +++ b/checkbox-ng/checkbox_ng/launcher/controller.py @@ -917,7 +917,7 @@ def _run_jobs(self, jobs_repr, total_num=0): next_job = True elif cmd == "quit": self.sa.remember_users_response(cmd) - raise SystemExit("Session saved, exiting...") + raise SystemExit("Session paused by the user") self.sa.remember_users_response(cmd) self.wait_for_job(dont_finish=True) elif interaction.kind in "steps": @@ -932,7 +932,7 @@ def _run_jobs(self, jobs_repr, total_num=0): next_job = True elif cmd == "quit": self.sa.remember_users_response(cmd) - raise SystemExit("Session saved, exiting...") + raise SystemExit("Session paused by the user") self.sa.remember_users_response(cmd) elif interaction.kind == "verification": self.wait_for_job(dont_finish=True) diff --git a/checkbox-ng/checkbox_ng/launcher/stages.py b/checkbox-ng/checkbox_ng/launcher/stages.py index 9a67a3a55c..798e71c971 100644 --- a/checkbox-ng/checkbox_ng/launcher/stages.py +++ b/checkbox-ng/checkbox_ng/launcher/stages.py @@ -99,6 +99,7 @@ def _run_single_job_with_ui_loop(self, job, ui): print(_("ID: {0}").format(job.id)) print(_("Category: {0}").format(job_state.effective_category_id)) comments = "" + self.sa.note_metadata_starting_job({"id" : job.id}, job_state) while True: if job.plugin in ( "user-interact", @@ -180,7 +181,7 @@ def _run_single_job_with_ui_loop(self, job, ui): result_builder.comments = comments break elif cmd == "quit": - raise SystemExit() + raise SystemExit("Session paused by the user") else: result_builder = self.sa.run_job(job.id, ui, False) else: diff --git a/checkbox-ng/checkbox_ng/launcher/test_stages.py b/checkbox-ng/checkbox_ng/launcher/test_stages.py new file mode 100644 index 0000000000..ac900a1852 --- /dev/null +++ b/checkbox-ng/checkbox_ng/launcher/test_stages.py @@ -0,0 +1,65 @@ +# This file is part of Checkbox. +# +# Copyright 2024 Canonical Ltd. +# Written by: +# Massimiliano Girardi +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . + + +from unittest import TestCase, mock + +from checkbox_ng.launcher.stages import MainLoopStage + + +class TestMainLoopStage(TestCase): + def test__run_single_job_with_ui_loop_quit(self): + self_mock = mock.MagicMock() + job_mock = mock.MagicMock() + ui_mock = mock.MagicMock() + job_state_mock = mock.MagicMock() + + job_mock.id = "job_id" + job_mock.plugin = "user-interact" + self_mock.sa.get_job_state.return_value = job_state_mock + job_state_mock.can_start.return_value = True + + ui_mock.wait_for_interaction_prompt.return_value = "quit" + + with self.assertRaises(SystemExit): + MainLoopStage._run_single_job_with_ui_loop( + self_mock, job_mock, ui_mock + ) + + def test__run_single_job_with_ui_loop_quit_skip_comment(self): + self_mock = mock.MagicMock() + job_mock = mock.MagicMock() + ui_mock = mock.MagicMock() + job_state_mock = mock.MagicMock() + + job_mock.id = "job_id" + job_mock.plugin = "user-interact" + self_mock.sa.get_job_state.return_value = job_state_mock + self_mock.is_interactive = True + job_state_mock.can_start.return_value = True + job_state_mock.effective_certification_status = "not_blocker" + + # Sequence of user actions: first "comment", then "skip" + ui_mock.wait_for_interaction_prompt.side_effect = ["comment", "skip"] + # Simulate user entering a comment after being prompted + with mock.patch('builtins.input', return_value="Test comment"): + result_builder = MainLoopStage._run_single_job_with_ui_loop( + self_mock, job_mock, ui_mock + ) + + self.assertEqual(result_builder.outcome, "skip") diff --git a/metabox/metabox/scenarios/cert_blocker_comment/launcher.py b/metabox/metabox/scenarios/cert_blocker_comment/launcher.py index 6095892fbb..b99a73333e 100644 --- a/metabox/metabox/scenarios/cert_blocker_comment/launcher.py +++ b/metabox/metabox/scenarios/cert_blocker_comment/launcher.py @@ -229,51 +229,3 @@ class UserInteractJobSkippedBeforeRun(Scenario): Send('f' + keys.KEY_ENTER), Expect(_re('(☐|job skipped).*User-interact job')), ] - - -@tag("resume") -class ManualJobSkippedWhenResumingSession(Scenario): - """ - Run a test plan with a manual job set to cert-blocker. Save and quit the - session, resume it and make sure it cannot be skipped until a comment is - added. - """ - - modes = ["local"] - launcher = textwrap.dedent( - """ - [launcher] - launcher_version = 1 - stock_reports = text - [test plan] - unit = 2021.com.canonical.certification::cert-blocker-manual-resume - [test selection] - forced = yes - """ - ) - steps = [ - Start(), - Expect("Select test plan"), - Send(keys.KEY_ENTER), - Expect("Pick an action"), - Send("p" + keys.KEY_ENTER), - Expect("save the session and quit"), - Send("q" + keys.KEY_ENTER), - Start(), - Expect("(R) Resume session"), - Send("r"), - Expect("blocker-manual-resume"), - Send(keys.KEY_ENTER), - Send(keys.KEY_DOWN + keys.KEY_ENTER), - Expect( - "Please add a comment to explain why you want to skip it.", - timeout=30, - ), - Expect("Please enter your comments:"), - Send("This is a comment" + keys.KEY_ENTER), - Expect("Pick an action"), - Send(keys.KEY_ENTER), - Expect("Select jobs to re-run"), - Send("f" + keys.KEY_ENTER), - Expect(_re("(☐|job skipped).*A simple manual job")), - ] diff --git a/metabox/metabox/scenarios/ui/resume_menu.py b/metabox/metabox/scenarios/ui/resume_menu.py index 4030d2e6e5..96c9061bc5 100644 --- a/metabox/metabox/scenarios/ui/resume_menu.py +++ b/metabox/metabox/scenarios/ui/resume_menu.py @@ -20,12 +20,11 @@ import metabox.core.keys as keys from metabox.core.actions import Expect, Send, Start, SelectTestPlan from metabox.core.scenario import Scenario -from metabox.core.utils import tag +from metabox.core.utils import tag, _re @tag("manual", "resume") class ResumeMenuMultipleDelete(Scenario): - modes = ["remote"] launcher = textwrap.dedent( """ [launcher] @@ -48,7 +47,7 @@ class ResumeMenuMultipleDelete(Scenario): Send("p" + keys.KEY_ENTER), Expect("Pick an action"), Send("q" + keys.KEY_ENTER), - Expect("Session saved"), + Expect("Session paused"), Start(), Expect("Select test plan"), SelectTestPlan( @@ -59,7 +58,7 @@ class ResumeMenuMultipleDelete(Scenario): Send("p" + keys.KEY_ENTER), Expect("Pick an action"), Send("q" + keys.KEY_ENTER), - Expect("Session saved"), + Expect("Session paused"), Start(), Expect("Resume session"), # Enter the resume menu @@ -76,13 +75,12 @@ class ResumeMenuMultipleDelete(Scenario): # Now we still have to be able to run test plans SelectTestPlan("2021.com.canonical.certification::whoami_as_user_tp "), Send(keys.KEY_ENTER), - Expect("Results"), + Expect(_re("(☑|job passed).*Print who is running the job")), ] @tag("manual", "resume") class ResumeMenuMarkSkip(Scenario): - modes = ["remote"] launcher = textwrap.dedent( """ [launcher] @@ -105,7 +103,7 @@ class ResumeMenuMarkSkip(Scenario): Send("p" + keys.KEY_ENTER), Expect("Pick an action"), Send("q" + keys.KEY_ENTER), - Expect("Session saved"), + Expect("Session paused"), Start(), Expect("Resume session"), # Enter the resume menu @@ -122,13 +120,12 @@ class ResumeMenuMarkSkip(Scenario): Expect("Skipped Jobs"), Expect("Finish"), Send("f"), - Expect("Result"), + Expect(_re("(☐|job skipped).*User-interact")), ] @tag("manual", "resume") class ResumeMenuMarkFail(Scenario): - modes = ["remote"] launcher = textwrap.dedent( """ [launcher] @@ -151,7 +148,7 @@ class ResumeMenuMarkFail(Scenario): Send("p" + keys.KEY_ENTER), Expect("Pick an action"), Send("q" + keys.KEY_ENTER), - Expect("Session saved"), + Expect("Session paused"), Start(), Expect("Resume session"), # Enter the resume menu @@ -171,12 +168,12 @@ class ResumeMenuMarkFail(Scenario): Expect("Failed Jobs"), Expect("Finish"), Send("f"), - Expect("Result"), + Expect(_re("(☒|job failed).*User-interact")), ] + @tag("manual", "resume") class ResumeMenuMarkPreCommentFail(Scenario): - modes = ["remote"] launcher = textwrap.dedent( """ [launcher] @@ -199,7 +196,7 @@ class ResumeMenuMarkPreCommentFail(Scenario): Send("p" + keys.KEY_ENTER), Expect("Pick an action"), Send("q" + keys.KEY_ENTER), - Expect("Session saved"), + Expect("Session paused"), Start(), Expect("Resume session"), # Enter the resume menu @@ -218,12 +215,12 @@ class ResumeMenuMarkPreCommentFail(Scenario): Expect("Failed Jobs"), Expect("Finish"), Send("f"), - Expect("Result"), + Expect(_re("(☒|job failed).*User-interact")), ] + @tag("manual", "resume") class ResumeMenuMarkPassed(Scenario): - modes = ["remote"] launcher = textwrap.dedent( """ [launcher] @@ -246,7 +243,7 @@ class ResumeMenuMarkPassed(Scenario): Send("p" + keys.KEY_ENTER), Expect("Pick an action"), Send("q" + keys.KEY_ENTER), - Expect("Session saved"), + Expect("Session paused"), Start(), Expect("Resume session"), # Enter the resume menu @@ -256,12 +253,12 @@ class ResumeMenuMarkPassed(Scenario): Expect("last job?"), # Select Mark as Pass Send("p"), - Expect("Result"), + Expect(_re("(☑|job passed).*User-interact")), ] + @tag("manual", "resume") class ResumeMenuResumeLastJob(Scenario): - modes = ["remote"] launcher = textwrap.dedent( """ [launcher] @@ -284,7 +281,7 @@ class ResumeMenuResumeLastJob(Scenario): Send("p" + keys.KEY_ENTER), Expect("Pick an action"), Send("q" + keys.KEY_ENTER), - Expect("Session saved"), + Expect("Session paused"), Start(), Expect("Resume session"), # Enter the resume menu @@ -296,5 +293,5 @@ class ResumeMenuResumeLastJob(Scenario): Send("R"), Expect("press ENTER to continue"), Send(keys.KEY_ENTER), - Expect("Result"), + Expect(_re("(☑|job passed).*User-interact")), ] From 04bc810c4a702dc74b3bd78e3cf6edc818d57f89 Mon Sep 17 00:00:00 2001 From: patliuu <111331153+patliuu@users.noreply.github.com> Date: Fri, 1 Mar 2024 17:24:44 +0800 Subject: [PATCH 056/108] Refactor cpufreq governors jobs (New) (#1014) * Refactor cpufreq governors jobs * Fix unittests error for Python3.5 --- .../bin/cpufreq_governors.py | 705 +++++------ .../tests/test_cpufreq_governors.py | 1125 +++++++++++++++-- 2 files changed, 1387 insertions(+), 443 deletions(-) mode change 100755 => 100644 contrib/checkbox-provider-ce-oem/tests/test_cpufreq_governors.py diff --git a/contrib/checkbox-provider-ce-oem/bin/cpufreq_governors.py b/contrib/checkbox-provider-ce-oem/bin/cpufreq_governors.py index 41c8bd99e3..6ba8cf3597 100755 --- a/contrib/checkbox-provider-ce-oem/bin/cpufreq_governors.py +++ b/contrib/checkbox-provider-ce-oem/bin/cpufreq_governors.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import argparse +import contextlib import logging import os import re @@ -42,12 +43,120 @@ def init_logger(): return root_logger -class CPUScalingInfo: - """A class for gathering CPU scaling information.""" +def with_timeout(timeout=10, interval=0.5): + """ + Decorator to set a timeout for a function's execution. + + This decorator allows you to execute a function with a specified timeout + duration. If the function does not return `True` within the given timeout, + the wrapper function returns `False`. The wrapper function sleeps for a + specified interval between each invocation until the timeout expires. + + Args: + - timeout (float, optional): Maximum time duration (in seconds) to wait + for the decorated function to return `True`. Defaults to 10 seconds. + - interval (float, optional): Time interval (in seconds) between + invocations within the timeout duration. Defaults to 0.5 seconds. + + Returns: + - bool: Returns `True` if the decorated function returns `True` within + the specified timeout; otherwise, returns `False`. + """ + def decorator(func): + def func_wrapper(*args, **kwargs): + start_time = time.time() + while time.time() - start_time < timeout: + if func(*args, **kwargs): + return True + time.sleep(interval) + return False + + return func_wrapper + + return decorator + + +def probe_governor_module(expected_governor): + """ + Attempt to probe and load a specific CPU frequency governor module. + + Args: + - expected_governor (str): The name of the CPU frequency governor module + to probe and load. + + Raises: + - subprocess.CalledProcessError: If the 'modprobe' command encounters an + error during the module loading process. + """ + logging.warning( + "Seems CPU frequency governors %s are not enable yet.", + expected_governor, + ) + module = "cpufreq_{}".format(expected_governor) + logging.info("Attempting to probe %s ...", module) + cmd = ["modprobe", module] + try: + subprocess.check_call( + cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + universal_newlines=True, + ) + logging.info("Probe module Successfully!") + except subprocess.CalledProcessError as err: + logging.error(err) + logging.error("%s governor not supported", expected_governor) + sys.exit(1) + + +def stress_cpus() -> List[subprocess.Popen]: + """ + Stress the CPU cores by running multiple dd processes. + + Returns: + subprocess.Popen: A list of Popen objects representing the + dd processes spawned for each CPU core. + """ + cpus_count = cpu_count() + + cmd = ["dd", "if=/dev/zero", "of=/dev/null"] + processes = [subprocess.Popen(cmd) for _ in range(cpus_count)] + return processes + + +def stop_stress_cpus(processes): + """ + Stop the CPU stress by terminating the specified dd processes. + + Args: + processes (List[subprocess.Popen]): A list of Popen objects + representing the dd processes. + """ + for p in processes: + p.terminate() + p.wait() + + +@contextlib.contextmanager +def context_stress_cpus(): + """ + Context manager to stress CPU cores using multiple dd processes. + """ + try: + logging.info("Stressing CPUs...") + processes = stress_cpus() + yield + finally: + logging.info("Stop stressing CPUs...") + stop_stress_cpus(processes) + + +class CPUScalingHandler: + """A class for getting and setting CPU scaling information.""" def __init__(self, policy=0): """ - Initialize the CPUScalingInfo object. + Initialize the CPUScalingHandler object. Args: policy (int): The CPU policy number to be used (default is 0). @@ -219,6 +328,17 @@ def get_max_frequency(self) -> int: frequency = self.get_policy_attribute("scaling_max_freq") return int(frequency) if frequency else 0 + def get_current_frequency(self) -> int: + """ + Get the current CPU frequency for the current policy. + + Returns: + int: The current CPU frequency in kHz. + """ + frequency = self.get_policy_attribute("scaling_cur_freq") + logging.debug("Current CPU frequency: %s", frequency) + return int(frequency) if frequency else 0 + def get_affected_cpus(self) -> List: """ Get the list of affected CPUs for the current policy. @@ -260,6 +380,60 @@ def set_governor(self, governor) -> bool: """ return self.set_policy_attribute("scaling_governor", governor) + @contextlib.contextmanager + def context_set_governor(self, governor): + """ + Context manager to temporarily set a CPU frequency governor and + then restores the original governor. + + Args: + - governor (str): The CPU frequency governor to set within the context. + + Raises: + - SystemExit: If setting the governor fails during setup or teardown. + """ + try: + if not self.set_policy_attribute("scaling_governor", governor): + sys.exit(1) + yield + finally: + logging.debug("-----------------TEARDOWN-----------------") + logging.debug( + "Restoring original governor to %s", + self.original_governor, + ) + if not self.set_policy_attribute( + "scaling_governor", self.original_governor + ): + sys.exit(1) + + @contextlib.contextmanager + def context_set_frequency(self, frequency): + """ + Context manager to temporarily set a CPU frequency and + then restores the orignal frequency. + + Args: + - frequency (str or int): The CPU frequency to set within the context. + + Raises: + - SystemExit: If setting the frequency fails during setup or teardown. + + """ + try: + original_frequency = self.get_current_frequency() + if not self.set_frequency(frequency): + sys.exit(1) + yield + finally: + logging.debug("-----------------TEARDOWN-----------------") + logging.debug( + "Restoring original frequency to %s", + original_frequency, + ) + if not self.set_frequency(original_frequency): + sys.exit(1) + def set_frequency(self, frequency) -> bool: """ Set the CPU frequency for the current policy. @@ -286,33 +460,7 @@ def __init__(self, policy=0): policy (int): The CPU policy number to be used (default is 0). """ self.policy = policy - self.info = CPUScalingInfo(policy=self.policy) - - def stress_cpus(self) -> subprocess.Popen: - """ - Stress the CPU cores by running multiple dd processes. - - Returns: - subprocess.Popen: A list of Popen objects representing the - dd processes spawned for each CPU core. - """ - cpus_count = cpu_count() - - cmd = ["dd", "if=/dev/zero", "of=/dev/null"] - processes = [subprocess.Popen(cmd) for _ in range(cpus_count)] - return processes - - def stop_stress_cpus(self, processes): - """ - Stop the CPU stress by terminating the specified dd processes. - - Args: - processes (List[subprocess.Popen]): A list of Popen objects - representing the dd processes. - """ - for p in processes: - p.terminate() - p.wait() + self.handler = CPUScalingHandler(policy=self.policy) def print_policy_info(self): """ @@ -320,26 +468,26 @@ def print_policy_info(self): """ logging.info("## CPUfreq Policy%s Info ##", self.policy) logging.info("Affected CPUs:") - if not self.info.governors: + if not self.handler.governors: logging.info(" None") else: - for cpu in self.info.affected_cpus: + for cpu in self.handler.affected_cpus: logging.info(" cpu%s", cpu) logging.info( "Supported CPU Frequencies: %s - %s MHz", - self.info.min_freq / 1000, - self.info.max_freq / 1000, + self.handler.min_freq / 1000, + self.handler.max_freq / 1000, ) logging.info("Supported Governors:") - if not self.info.governors: + if not self.handler.governors: logging.info(" None") else: - for governor in self.info.governors: + for governor in self.handler.governors: logging.info(" %s", governor) - logging.info("Current Governor: %s", self.info.original_governor) + logging.info("Current Governor: %s", self.handler.original_governor) def test_driver_detect(self) -> bool: """ @@ -353,12 +501,12 @@ def test_driver_detect(self) -> bool: bool: True if the drivers are printed successfully, False otherwise. """ - if not self.info.cpu_policies: + if not self.handler.cpu_policies: return False drivers = [] - for policy in self.info.cpu_policies: - driver = self.info.get_scaling_driver(policy) - if driver not in drivers: + for policy in self.handler.cpu_policies: + driver = self.handler.get_scaling_driver(policy) + if driver and driver not in drivers: drivers.append(driver) if not drivers: return False @@ -366,6 +514,131 @@ def test_driver_detect(self) -> bool: print("scaling_driver: {}".format(" ".join(drivers))) return True + @with_timeout() + def is_frequency_equal_to_target(self, target) -> bool: + """ + Check if the current CPU frequency matches the target frequency. + + Args: + - target (str or int): The target CPU frequency to compare against. + + Returns: + - bool: Returns True if the current frequency matches the target + frequency; otherwise, returns False. + """ + curr_freq = self.handler.get_current_frequency() + return curr_freq == target + + @with_timeout() + def is_frequency_settled_down(self) -> bool: + """ + Check if the current CPU frequency has settled down below the maximum. + + Returns: + - bool: Returns True if the current frequency is below the maximum; + otherwise, returns False. + """ + curr_freq = self.handler.get_current_frequency() + return curr_freq < self.handler.max_freq + + def test_frequency_influence(self, governor, target_freq=None) -> bool: + """ + Test the influence of CPU frequency based on the provided governor. + + This function tests the influence of CPU frequency settings by + setting different governors and verifying if the CPU frequency + behaves as expected. + + Args: + - governor (str): The CPU frequency governor to test. + - target_freq (int, optional): The target CPU frequency for the + 'userspace' governor. Defaults to None. + + Returns: + - bool: Returns True if all verification checks pass; + otherwise, returns False. + + Raises: + - SystemExit: If an unsupported governor is provided. + """ + frequencies_mapping = { + "performance": (self.handler.max_freq, "Max."), + "powersave": (self.handler.min_freq, "Min."), + "ondemand": (self.handler.max_freq, "Max."), + "conservative": (self.handler.max_freq, "Max."), + "schedutil": (self.handler.max_freq, "Max."), + } + success = True + with self.handler.context_set_governor(governor): + if governor in ["ondemand", "conservative", "schedutil"]: + with context_stress_cpus(): + if self.is_frequency_equal_to_target( + target=frequencies_mapping[governor][0] + ): + logging.info( + "Verified current CPU frequency is equal to " + "%s frequency %s MHz", + frequencies_mapping[governor][1], + (frequencies_mapping[governor][0] / 1000), + ) + else: + success = False + logging.error( + "Could not verify that cpu frequency is equal to " + "%s frequency %s MHz", + frequencies_mapping[governor][1], + (frequencies_mapping[governor][0] / 1000), + ) + if self.is_frequency_settled_down(): + logging.info( + "Verified current CPU frequency has settled to a " + "lower frequency" + ) + else: + success = False + logging.error( + "Could not verify that cpu frequency has settled to a " + "lower frequency" + ) + elif governor == "userspace": + with self.handler.context_set_frequency(target_freq): + if self.is_frequency_equal_to_target( + target=target_freq, + ): + logging.info( + "Verified current CPU frequency is equal to " + "frequency %s MHz", + (target_freq / 1000), + ) + else: + success = False + logging.error( + "Could not verify that cpu frequency is equal to " + "frequency %s MHz", + (target_freq / 1000), + ) + elif governor in ["performance", "powersave"]: + if self.is_frequency_equal_to_target( + target=frequencies_mapping[governor][0], + ): + logging.info( + "Verified current CPU frequency is close to " + "%s frequency %s MHz", + frequencies_mapping[governor][1], + (frequencies_mapping[governor][0] / 1000), + ) + else: + success = False + logging.error( + "Could not verify that cpu frequency has close to " + "frequency %s MHz", + frequencies_mapping[governor][1], + (frequencies_mapping[governor][0] / 1000), + ) + else: + sys.exit("Governor '{}' not supported".format(governor)) + return success + def test_userspace(self) -> bool: """ Run the Userspace Governor Test. @@ -374,54 +647,17 @@ def test_userspace(self) -> bool: bool: True if the test passes, False otherwise. """ logging.info("-------------------------------------------------") - logging.info("Running Userspace Governor Test") - success = True - governor = "userspace" - if governor not in self.info.governors: - if not self.probe_governor_module(governor): - return False - - logging.info("Setting governor to %s", governor) - if not self.info.set_governor(governor): - success = False - - # Set freq to minimum, verify - frequency = self.info.min_freq logging.info( - "Setting CPU frequency to %u MHz", (int(frequency) / 1000) + "Running Userspace Governor Test on CPU policy%s", self.policy ) - if not self.info.set_frequency(frequency): - success = False - - curr_freq = int(self.info.get_policy_attribute("scaling_cur_freq")) - if not curr_freq or (self.info.min_freq != curr_freq): - logging.error( - "Could not verify that cpu frequency is set to the minimum" - " value of %s", - self.info.min_freq, - ) - success = False - - # Set freq to maximum, verify - frequency = self.info.max_freq - logging.info( - "Setting CPU frequency to %u MHz", (int(frequency) / 1000) + governor = "userspace" + return self.test_frequency_influence( + governor, + self.handler.max_freq, + ) and self.test_frequency_influence( + governor, + self.handler.min_freq, ) - if not self.info.set_frequency(frequency): - success = False - - curr_freq = int(self.info.get_policy_attribute("scaling_cur_freq")) - if not curr_freq or (self.info.max_freq != curr_freq): - logging.error( - "Could not verify that cpu frequency is set to the minimum" - " value of %s", - self.info.max_freq, - ) - success = False - - if success: - logging.info("Userspace Governor Test: PASS") - return success def test_performance(self) -> bool: """ @@ -431,36 +667,11 @@ def test_performance(self) -> bool: bool: True if the test passes, False otherwise. """ logging.info("-------------------------------------------------") - logging.info("Running Performance Governor Test") - success = True - governor = "performance" - if governor not in self.info.governors: - if not self.probe_governor_module(governor): - return False - - logging.info("Setting governor to %s", governor) - if not self.info.set_governor(governor): - success = False - - curr_freq = int(self.info.get_policy_attribute("scaling_cur_freq")) - logging.debug( - "Verifying current CPU frequency %s is close to max frequency", - curr_freq, + logging.info( + "Running Performance Governor Test on CPU policy%s", self.policy ) - if not curr_freq or ( - float(curr_freq) < 0.99 * float(self.info.max_freq) - ): - logging.error( - "Current cpu frequency of %s is not close enough to the " - "maximum value of %s", - curr_freq, - self.info.max_freq, - ) - success = False - - if success: - logging.info("Performance Governor Test: PASS") - return success + governor = "performance" + return self.test_frequency_influence(governor) def test_powersave(self) -> bool: """ @@ -470,36 +681,11 @@ def test_powersave(self) -> bool: bool: True if the test passes, False otherwise. """ logging.info("-------------------------------------------------") - logging.info("Running Powersave Governor Test") - success = True - governor = "powersave" - if governor not in self.info.governors: - if not self.probe_governor_module(governor): - return False - - logging.info("Setting governor to %s", governor) - if not self.info.set_governor(governor): - success = False - - curr_freq = int(self.info.get_policy_attribute("scaling_cur_freq")) - logging.debug( - "Verifying current CPU frequency %s is close to min frequency", - curr_freq, + logging.info( + "Running Powersave Governor Test on CPU policy%s", self.policy ) - if not curr_freq or ( - float(curr_freq) * 0.99 > float(self.info.min_freq) - ): - logging.error( - "Current cpu frequency of %s is not close enough to the " - "minimum value of %s", - curr_freq, - self.info.min_freq, - ) - success = False - - if success: - logging.info("Powersave Governor Test: PASS") - return success + governor = "powersave" + return self.test_frequency_influence(governor) def test_ondemand(self) -> bool: """ @@ -512,62 +698,8 @@ def test_ondemand(self) -> bool: logging.info( "Running Ondemand Governor Test on CPU policy%s", self.policy ) - success = True governor = "ondemand" - if governor not in self.info.governors: - if not self.probe_governor_module(governor): - return False - - logging.info("Setting governor to %s", governor) - if not self.info.set_governor(governor): - success = False - - logging.info("Stressing CPUs...") - stress_process = self.stress_cpus() - time.sleep(5) - - curr_freq = int(self.info.get_policy_attribute("scaling_cur_freq")) - logging.debug("Current CPU frequency: %s MHz", (curr_freq / 1000)) - if ( - not self.info.max_freq - or not curr_freq - or (self.info.max_freq != curr_freq) - ): - logging.error( - "Could not verify that cpu frequency has increased to the " - "maximum value" - ) - success = False - else: - logging.info( - "Verified current CPU frequency is equal to the max frequency" - ) - - logging.info("Stop stressing CPUs...") - self.stop_stress_cpus(stress_process) - time.sleep(8) - - curr_freq = int(self.info.get_policy_attribute("scaling_cur_freq")) - logging.debug("Current CPU frequency: %s MHz", (curr_freq / 1000)) - if ( - not self.info.min_freq - or not curr_freq - or (self.info.max_freq <= curr_freq) - ): - logging.error( - "Could not verify that cpu frequency has settled to a " - "lower frequency" - ) - success = False - else: - logging.info( - "Verified current CPU frequency has settled to a " - "lower frequency" - ) - - if success: - logging.info("Ondemand Governor Test: PASS") - return success + return self.test_frequency_influence(governor) def test_conservative(self) -> bool: """ @@ -580,62 +712,8 @@ def test_conservative(self) -> bool: logging.info( "Running Conservative Governor Test on CPU policy%s", self.policy ) - success = True governor = "conservative" - if governor not in self.info.governors: - if not self.probe_governor_module(governor): - return False - - logging.info("Setting governor to %s", governor) - if not self.info.set_governor(governor): - success = False - - logging.info("Stressing CPUs...") - stress_process = self.stress_cpus() - time.sleep(5) - - curr_freq = int(self.info.get_policy_attribute("scaling_cur_freq")) - logging.debug("Current CPU frequency: %s MHz", (curr_freq / 1000)) - if ( - not self.info.max_freq - or not curr_freq - or (self.info.max_freq != curr_freq) - ): - logging.error( - "Could not verify that cpu frequency has increased to the " - "maximum value" - ) - success = False - else: - logging.info( - "Verified current CPU frequency is equal to the max frequency" - ) - - logging.info("Stop stressing CPUs...") - self.stop_stress_cpus(stress_process) - time.sleep(8) - - curr_freq = int(self.info.get_policy_attribute("scaling_cur_freq")) - logging.debug("Current CPU frequency: %s MHz", (curr_freq / 1000)) - if ( - not self.info.min_freq - or not curr_freq - or (self.info.max_freq <= curr_freq) - ): - logging.error( - "Could not verify that cpu frequency has settled to a " - "lower frequency" - ) - success = False - else: - logging.info( - "Verified current CPU frequency has settled to a " - "lower frequency" - ) - - if success: - logging.info("Conservative Governor Test: PASS") - return success + return self.test_frequency_influence(governor) def test_schedutil(self) -> bool: """ @@ -648,98 +726,8 @@ def test_schedutil(self) -> bool: logging.info( "Running Schedutil Governor Test on CPU policy%s", self.policy ) - success = True governor = "schedutil" - if governor not in self.info.governors: - if not self.probe_governor_module(governor): - return False - - logging.info("Setting governor to %s", governor) - if not self.info.set_governor(governor): - success = False - - logging.info("Stressing CPUs...") - stress_process = self.stress_cpus() - time.sleep(5) - - curr_freq = int(self.info.get_policy_attribute("scaling_cur_freq")) - logging.debug("Current CPU frequency: %s MHz", (curr_freq / 1000)) - if ( - not self.info.max_freq - or not curr_freq - or (self.info.max_freq != curr_freq) - ): - logging.error( - "Could not verify that cpu frequency has increased to the " - "maximum value" - ) - success = False - else: - logging.info( - "Verified current CPU frequency is equal to the max frequency" - ) - - logging.info("Stop stressing CPUs...") - self.stop_stress_cpus(stress_process) - time.sleep(8) - - curr_freq = int(self.info.get_policy_attribute("scaling_cur_freq")) - logging.debug("Current CPU frequency: %s MHz", (curr_freq / 1000)) - if ( - not self.info.min_freq - or not curr_freq - or (self.info.max_freq <= curr_freq) - ): - logging.error( - "Could not verify that cpu frequency has settled to a " - "lower frequency" - ) - success = False - else: - logging.info( - "Verified current CPU frequency has settled to a " - "lower frequency" - ) - - if success: - logging.info("Schedutil Governor Test: PASS") - return success - - def restore_governor(self): - """ - Restore the CPU governor to the original value. - - This method sets the CPU governor to the original governor value - stored during initialization. - """ - logging.info("-------------------------------------------------") - logging.info( - "Restoring original governor to %s", - self.info.original_governor - ) - self.info.set_governor(self.info.original_governor) - - def probe_governor_module(self, expected_governor): - logging.info("Seems CPU frequency governors %s are not" - " enable yet.", expected_governor) - module = ("cpufreq_{}".format(expected_governor)) - logging.info("Attempting to probe %s ...", module) - cmd = ["modprobe", module] - try: - subprocess.run( - cmd, - check=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True, - encoding="utf-8" - ) - logging.info("Probe module Successfully!") - return True - except subprocess.CalledProcessError as err: - logging.error(err) - logging.error("%s governor not supported", expected_governor) - return False + return self.test_frequency_influence(governor) def main(): @@ -754,9 +742,6 @@ def main(): --driver-detect: Print the CPU scaling driver. --policy: Run the test on a specific CPU policy (default is policy 0). --governor: Run a specific governor test. - - Returns: - int: The exit code of the test execution, 0 if successful, 1 otherwise. """ parser = argparse.ArgumentParser() parser.add_argument( @@ -779,7 +764,7 @@ def main(): "--policy", dest="policy", help="Run test on specific policy", - default="0", + default=0, ) parser.add_argument( "--governor", @@ -792,27 +777,25 @@ def main(): if args.debug: logger.setLevel(logging.DEBUG) - info = CPUScalingInfo() + handler = CPUScalingHandler() if args.policy_resource: - info.print_policies_list() - return 0 + handler.print_policies_list() + sys.exit(0) test = CPUScalingTest(policy=args.policy) if args.driver_detect: - return 0 if test.test_driver_detect() else 1 + sys.exit(0) if test.test_driver_detect() else sys.exit(1) - exit_code = 0 try: test.print_policy_info() + if args.governor not in handler.governors: + probe_governor_module(args.governor) if not getattr(test, "test_{}".format(args.governor))(): - exit_code = 1 + sys.exit(1) except AttributeError: - logging.exception("Given governor is not supported") - return 1 - - test.restore_governor() - return exit_code + logging.error("Given governor is not supported") + sys.exit(1) if __name__ == "__main__": - sys.exit(main()) + main() diff --git a/contrib/checkbox-provider-ce-oem/tests/test_cpufreq_governors.py b/contrib/checkbox-provider-ce-oem/tests/test_cpufreq_governors.py old mode 100755 new mode 100644 index c4e2e97a84..69e35b5af4 --- a/contrib/checkbox-provider-ce-oem/tests/test_cpufreq_governors.py +++ b/contrib/checkbox-provider-ce-oem/tests/test_cpufreq_governors.py @@ -1,156 +1,1117 @@ -#!/usr/bin/python3 +#!/usr/bin/env python3 import unittest import subprocess -import io import logging -from unittest import mock -""" -We probably could remove append path while mirge back to ppc. -Since checkbox has __init__.py for unit tests. -ref: -https://github.com/canonical/checkbox/blob/main/checkbox-support/checkbox_support/tests/__init__.py -""" import sys -import os -# Add the path to the 'bin' directory for the import to work -sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'bin')) -from cpufreq_governors import CPUScalingInfo, CPUScalingTest +from io import StringIO +from unittest.mock import patch, mock_open, Mock, MagicMock +from cpufreq_governors import ( + CPUScalingHandler, + CPUScalingTest, + init_logger, + probe_governor_module, + stress_cpus, + stop_stress_cpus, + context_stress_cpus, + main, +) -class TestCPUScalingTest(unittest.TestCase): - @mock.patch('cpufreq_governors.CPUScalingInfo', - return_value=None) - def setUp(self, mock_cpuscalinginfo): - suppress_text = io.StringIO() + +class TestInitLogger(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.original_stdout = sys.stdout + suppress_text = StringIO() sys.stdout = suppress_text logging.disable(logging.CRITICAL) - # Create an instance of CPUScalingTest - self.cpu_scaling_test = CPUScalingTest() - - @mock.patch('subprocess.run') - def test_probe_governor_module_success(self, mock_subprocess_run): - # Simulate a scenario governor module probe successfully. - governor = "test_governor" - status = self.cpu_scaling_test.probe_governor_module( - governor - ) - mock_subprocess_run.returncode = 0 - self.assertLogs("Probe module Successfully!") - self.assertTrue(status) - - @mock.patch('subprocess.run') - def test_probe_governor_module_fail(self, mock_subprocess_run): - # Simulate a scenario where the governors module probed fail. - # Create a mock subprocess.CompletedProcess object with a - # return code of SystemError - governor = "test_governor" - cmd = ["modprobe", governor] - mock_subprocess_run.side_effect = subprocess.CalledProcessError( - returncode=1, - cmd=cmd, - ) - status = self.cpu_scaling_test.probe_governor_module(governor) - self.assertLogs("governor not supported") - self.assertFalse(status) + + def setUp(self): + # Save the original logging configuration + self.original_log_config = logging.getLogger().handlers def tearDown(self): - # release stdout - sys.stdout = sys.__stdout__ + # Restore the original logging configuration after each test + logging.getLogger().handlers = self.original_log_config + + @classmethod + def tearDownClass(cls): + sys.stdout = cls.original_stdout logging.disable(logging.NOTSET) + @patch("sys.stdout", new_callable=StringIO) + @patch("sys.stderr", new_callable=StringIO) + def test_logger_configuration(self, mock_stderr, mock_stdout): + logger = init_logger() + + # Test if the logger is an instance of logging.Logger + self.assertIsInstance(logger, logging.Logger) + + # Test if there are three handlers attached to the logger + # (including the default handler) + self.assertEqual(len(logger.handlers), 3) + + # Test if the logger level is set to INFO + self.assertEqual(logger.level, logging.INFO) + + @patch("sys.stdout", new_callable=StringIO) + @patch("sys.stderr", new_callable=StringIO) + def test_logging_levels(self, mock_stderr, mock_stdout): + logger = init_logger() -class TestCPUScalingInfo(unittest.TestCase): - @mock.patch('cpufreq_governors.CPUScalingInfo.__init__', - return_value=None) - def setUp(self, - mock_init): - suppress_text = io.StringIO() + # Test if the stdout handler has the correct level + stdout_handler = next( + handler + for handler in logger.handlers + if isinstance(handler, logging.StreamHandler) + and handler.stream == sys.stdout + ) + self.assertEqual(stdout_handler.level, logging.DEBUG) + + # Test if the stderr handler has the correct level + stderr_handler = next( + handler + for handler in logger.handlers + if isinstance(handler, logging.StreamHandler) + and handler.stream == sys.stderr + ) + self.assertEqual(stderr_handler.level, logging.WARNING) + + +class TestProbeGovernorModule(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.original_stdout = sys.stdout + suppress_text = StringIO() sys.stdout = suppress_text logging.disable(logging.CRITICAL) - CPUScalingInfo.__init__ = mock_init - # Create an instance of CPUScalingInfo - self.cpu_scaling_info = CPUScalingInfo() + + @classmethod + def tearDownClass(cls): + sys.stdout = cls.original_stdout + logging.disable(logging.NOTSET) + + @patch("subprocess.check_call") + @patch("sys.exit") + @patch("logging.info") + def test_probe_governor_module_success( + self, mock_logging_info, mock_sys_exit, mock_subprocess_check_call + ): + mock_subprocess_check_call.return_value = None + + probe_governor_module("ondemand") + + mock_logging_info.assert_called_with("Probe module Successfully!") + # Ensure sys.exit was not called + mock_sys_exit.assert_not_called() + + @patch("subprocess.check_call") + @patch("sys.exit") + @patch("logging.error") + def test_probe_governor_module_error( + self, mock_logging_error, mock_sys_exit, mock_subprocess_check_call + ): + mock_subprocess_check_call.side_effect = subprocess.CalledProcessError( + 1, "modprobe" + ) + + probe_governor_module("invalid_governor") + + mock_logging_error.assert_called_with( + "%s governor not supported", "invalid_governor" + ) + # Ensure sys.exit was called with 1 + mock_sys_exit.assert_called_with(1) + + +class TestCPUSStress(unittest.TestCase): + @patch("cpufreq_governors.subprocess.Popen") + @patch("cpufreq_governors.cpu_count") + def test_stress_cpus(self, mock_cpu_count, mock_popen): + mock_cpu_count.return_value = 4 # Simulating 4 CPU cores + mock_popen_instance = MagicMock() + mock_popen.return_value = ( + mock_popen_instance # Mocking the Popen object + ) + + stress_cpus() + + # Assert that the Popen was called 4 times + self.assertEqual(mock_popen.call_count, 4) + # Check if the Popen was called with the correct command + mock_popen.assert_called_with(["dd", "if=/dev/zero", "of=/dev/null"]) + + @patch("cpufreq_governors.subprocess.Popen") + def test_stop_stress_cpus(self, mock_popen): + # Mocking a list of mock Popen objects + mock_processes = [ + MagicMock() for _ in range(4) + ] # Simulating 4 CPU cores + + stop_stress_cpus(mock_processes) + + for mock_process in mock_processes: + self.assertEqual(mock_process.terminate.call_count, 1) + self.assertEqual(mock_process.wait.call_count, 1) + + @patch("cpufreq_governors.stress_cpus") + def test_context_stress_cpus(self, mock_stress_cpus): + # Mocking the return value of stress_cpus + mock_stress_cpus.return_value = [ + MagicMock() for _ in range(4) + ] # Simulating 4 CPU cores + + # Using the context manager for context_stress_cpus + with context_stress_cpus(): + pass + + self.assertEqual(mock_stress_cpus.call_count, 1) + + +class TestCPUScalingHandler(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.original_stdout = sys.stdout + suppress_text = StringIO() + sys.stdout = suppress_text + logging.disable(logging.CRITICAL) + + def setUp(self): + self.cpu_scaling_info = CPUScalingHandler() self.cpu_scaling_info.sys_cpu_dir = "/sys/devices/system/cpu" - @mock.patch('os.listdir') + @classmethod + def tearDownClass(cls): + sys.stdout = cls.original_stdout + logging.disable(logging.NOTSET) + + @patch("os.listdir") def test_get_cpu_policies_success(self, mock_listdir): - # Mock the listdir function to return a list of CPU policies + """Check if a sorted list contains cpu policy number can be returned + while policies exist. + """ mock_listdir.return_value = ["policy0", "policy1", "policy2"] - # Call the get_cpu_policies function + policies = self.cpu_scaling_info.get_cpu_policies() - # Assert that the function returns the expected list of policies self.assertEqual(policies, [0, 1, 2]) - @mock.patch('os.listdir') + @patch("os.listdir") def test_get_cpu_policies_failure(self, mock_listdir): + """Check if an empty list be returned while OSError""" # Mock the listdir function to raise an OSError mock_listdir.side_effect = OSError("OSError") + result = self.cpu_scaling_info.get_cpu_policies() + self.assertEqual(result, []) - @mock.patch('os.listdir') + @patch("os.listdir") def test_get_cpu_policies_failure_empty(self, mock_listdir): + """Check if an empty list be returned while no policy exists""" # Mock the listdir function to return an empty list mock_listdir.return_value = [] + result = self.cpu_scaling_info.get_cpu_policies() + self.assertEqual(result, []) - @mock.patch('builtins.open', mock.mock_open(read_data='Driver')) + @patch("builtins.open", mock_open(read_data="Driver")) def test_get_scaling_driver_success(self): + """Check if the name of driver be returned""" # Mock the open function to return a scaling driver result = self.cpu_scaling_info.get_scaling_driver() + self.assertEqual(result, "Driver") - @mock.patch('builtins.open', side_effect=OSError) + @patch("builtins.open") def test_get_scaling_driver_oserror(self, mock_open): + """Check if an empty string be returned while OSError""" # Mock the open function to raise an OSError + mock_open.side_effect = OSError("OSError") + result = self.cpu_scaling_info.get_scaling_driver() + self.assertEqual(result, "") - @mock.patch('builtins.open', mock.mock_open(read_data='Attribute_Value')) + @patch( + "cpufreq_governors.CPUScalingHandler.get_scaling_driver", + return_value="some_driver_name", + ) + @patch("builtins.print") # Mock the built-in print function + def test_print_policies_list_success( + self, mock_print, mock_get_scaling_driver + ): + scaling_info = CPUScalingHandler(policy=0) + scaling_info.cpu_policies = [0, 1] + + result = scaling_info.print_policies_list() + + mock_get_scaling_driver.assert_any_call(0) + mock_get_scaling_driver.assert_any_call(1) + # Ensure that the method returns True when successful + self.assertTrue(result) + + @patch( + "cpufreq_governors.CPUScalingHandler.get_scaling_driver", + return_value="", + ) + @patch("builtins.print") # Mock the built-in print function + def test_print_policies_list_failure( + self, mock_print, mock_get_scaling_driver + ): + scaling_info = CPUScalingHandler(policy=0) + + with patch.object(scaling_info, "cpu_policies", False): + result = scaling_info.print_policies_list() + self.assertFalse(result) + + @patch("builtins.open", mock_open(read_data="Attribute_Value")) def test_get_attribute_success(self): + """Check if get_attribute gets the contain of specific node""" # Mock the open function to return a attribute value result = self.cpu_scaling_info.get_attribute("Attribute") + self.assertEqual(result, "Attribute_Value") - @mock.patch('builtins.open', side_effect=OSError) + @patch("builtins.open", side_effect=OSError) def test_get_attribute_oserror(self, mock_open): + """Check if get_attribute gets an empty string while OSError occurs""" # Mock the open function to raise an OSError result = self.cpu_scaling_info.get_attribute("Attribute") + self.assertEqual(result, "") - @mock.patch('builtins.open', new_callable=mock.mock_open, create=True) + @patch("builtins.open", new_callable=mock_open, create=True) def test_set_attribute_success(self, mock_open): + """Check if returns True while setting a value to a specific node""" mock_file = mock_open.return_value result = self.cpu_scaling_info.set_attribute( - 'attribute_name', - 'new_value') - mock_file.write.assert_called_once_with('new_value') + "attribute_name", "new_value" + ) + + mock_file.write.assert_called_once_with("new_value") self.assertTrue(result) - @mock.patch('builtins.open', side_effect=PermissionError) + @patch("builtins.open", side_effect=PermissionError) def test_set_attribute_permissionerror(self, mock_open): - # Mock the open function to raise an OSError + """Check if returns False while PermissionError occurs""" + # Mock the open function to raise an PermissionError result = self.cpu_scaling_info.set_attribute( - 'attribute_name', - 'new_value') + "attribute_name", "new_value" + ) + self.assertFalse(result) - @mock.patch('builtins.open', side_effect=OSError) + @patch("builtins.open", side_effect=OSError) def test_set_attribute_oserror(self, mock_open): + """Check if returns False while OSError occurs""" # Mock the open function to raise an OSError result = self.cpu_scaling_info.set_attribute( - 'attribute_name', - 'new_value') + "attribute_name", "new_value" + ) + self.assertFalse(result) - def tearDown(self): - # release stdout - sys.stdout = sys.__stdout__ + @patch( + "cpufreq_governors.CPUScalingHandler.set_attribute", + return_value=True, + ) + def test_set_policy_attribute_success(self, mock_set_attribute): + result = self.cpu_scaling_info.set_policy_attribute( + "some_attr", "some_value" + ) + + self.assertTrue(result) + + @patch( + "cpufreq_governors.CPUScalingHandler.get_policy_attribute", + return_value="1000000", + ) + def test_get_min_frequency_success(self, mock_get_policy_attribute): + result = self.cpu_scaling_info.get_min_frequency() + + mock_get_policy_attribute.assert_called_once_with("scaling_min_freq") + + self.assertEqual(result, 1000000) + + @patch( + "cpufreq_governors.CPUScalingHandler.get_policy_attribute", + return_value=None, + ) + def test_get_min_frequency_with_no_frequency( + self, mock_get_policy_attribute + ): + result = self.cpu_scaling_info.get_min_frequency() + + mock_get_policy_attribute.assert_called_once_with("scaling_min_freq") + + # Expected frequency is 0 when no value is returned + self.assertEqual(result, 0) + + @patch( + "cpufreq_governors.CPUScalingHandler.get_policy_attribute", + return_value="1000000", + ) + def test_get_max_frequency_success(self, mock_get_policy_attribute): + result = self.cpu_scaling_info.get_max_frequency() + + mock_get_policy_attribute.assert_called_once_with("scaling_max_freq") + + self.assertEqual(result, 1000000) + + @patch( + "cpufreq_governors.CPUScalingHandler.get_policy_attribute", + return_value=None, + ) + def test_get_max_frequency_with_no_frequency( + self, mock_get_policy_attribute + ): + result = self.cpu_scaling_info.get_max_frequency() + + mock_get_policy_attribute.assert_called_once_with("scaling_max_freq") + + # Expected frequency is 0 when no value is returned + self.assertEqual(result, 0) + + @patch( + "cpufreq_governors.CPUScalingHandler.get_policy_attribute", + return_value="1000000", + ) + def test_get_current_frequency_success(self, mock_get_policy_attribute): + result = self.cpu_scaling_info.get_current_frequency() + + mock_get_policy_attribute.assert_called_once_with("scaling_cur_freq") + + self.assertEqual(result, 1000000) + + @patch( + "cpufreq_governors.CPUScalingHandler.get_policy_attribute", + return_value=None, + ) + def test_get_current_frequency_with_no_frequency( + self, mock_get_policy_attribute + ): + result = self.cpu_scaling_info.get_current_frequency() + + mock_get_policy_attribute.assert_called_once_with("scaling_cur_freq") + + self.assertEqual(result, 0) + + @patch( + "cpufreq_governors.CPUScalingHandler.get_policy_attribute", + return_value="0 1 2 3", + ) + def test_get_affected_cpus_with_spaces_success( + self, mock_get_policy_attribute + ): + result = self.cpu_scaling_info.get_affected_cpus() + + mock_get_policy_attribute.assert_called_once_with("affected_cpus") + + self.assertEqual(result, ["0", "1", "2", "3"]) + + @patch( + "cpufreq_governors.CPUScalingHandler.get_policy_attribute", + return_value="", + ) + def test_get_affected_cpus_with_no_value(self, mock_get_policy_attribute): + result = self.cpu_scaling_info.get_affected_cpus() + + mock_get_policy_attribute.assert_called_once_with("affected_cpus") + + self.assertEqual(result, []) + + @patch( + "cpufreq_governors.CPUScalingHandler.set_policy_attribute", + return_value=True, + ) + def test_set_governor_success(self, mock_set_policy_attribute): + result = self.cpu_scaling_info.set_governor("ondemand") + + mock_set_policy_attribute.assert_called_once_with( + "scaling_governor", "ondemand" + ) + + self.assertTrue(result) + + @patch( + "cpufreq_governors.CPUScalingHandler.set_policy_attribute", + return_value=False, + ) + def test_set_governor_failure(self, mock_set_policy_attribute): + result = self.cpu_scaling_info.set_governor("performance") + + mock_set_policy_attribute.assert_called_once_with( + "scaling_governor", "performance" + ) + + self.assertFalse(result) + + @patch( + "cpufreq_governors.CPUScalingHandler.set_policy_attribute", + return_value=True, + ) + def test_context_set_governor_success(self, mock_set_policy_attribute): + # Using the context manager + with self.cpu_scaling_info.context_set_governor("ondemand"): + mock_set_policy_attribute.assert_called_once_with( + "scaling_governor", "ondemand" + ) + + @patch( + "cpufreq_governors.CPUScalingHandler.set_policy_attribute", + return_value=False, + ) + def test_context_set_governor_failure(self, mock_set_policy_attribute): + # Using the context manager with an expected failure + try: + with self.cpu_scaling_info.context_set_governor("performance"): + mock_set_policy_attribute.assert_called_once_with( + "scaling_governor", "performance" + ) + except SystemExit: + # Exception caught as expected + pass + else: + self.fail("Expected SystemExit") + + @patch( + "cpufreq_governors.CPUScalingHandler.set_frequency", return_value=True + ) + def test_context_set_frequency_success(self, mock_set_frequency): + # Using the context manager + with self.cpu_scaling_info.context_set_frequency("1200000"): + mock_set_frequency.assert_called_once_with("1200000") + + @patch( + "cpufreq_governors.CPUScalingHandler.set_frequency", return_value=False + ) + def test_context_set_frequency_failure(self, mock_set_frequency): + # Using the context manager with an expected failure + try: + with self.cpu_scaling_info.context_set_frequency("1200000"): + mock_set_frequency.assert_called_once_with("1200000") + except SystemExit: + # Exception caught as expected + pass + else: + self.fail("Expected SystemExit") + + @patch( + "cpufreq_governors.CPUScalingHandler.set_policy_attribute", + return_value=True, + ) + def test_set_frequency_success(self, mock_set_policy_attribute): + result = self.cpu_scaling_info.set_frequency("1200000") + + mock_set_policy_attribute.assert_called_once_with( + "scaling_setspeed", "1200000" + ) + + self.assertTrue(result) + + @patch( + "cpufreq_governors.CPUScalingHandler.set_policy_attribute", + return_value=False, + ) + def test_set_frequency_failure(self, mock_set_policy_attribute): + result = self.cpu_scaling_info.set_frequency("1200000") + + mock_set_policy_attribute.assert_called_once_with( + "scaling_setspeed", "1200000" + ) + + self.assertFalse(result) + + +class TestCPUScalingTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.original_stdout = sys.stdout + suppress_text = StringIO() + sys.stdout = suppress_text + logging.disable(logging.CRITICAL) + + @classmethod + def tearDownClass(cls): + sys.stdout = cls.original_stdout logging.disable(logging.NOTSET) + @patch("cpufreq_governors.CPUScalingHandler") + def test_print_policy_info(self, mock_cpuscalinghandler): + mock_handler_instance = Mock() + mock_handler_instance.affected_cpus = [0, 1, 2] + mock_handler_instance.min_freq = 1000000 # 1000 kHz + mock_handler_instance.max_freq = 3000000 # 3000 kHz + mock_handler_instance.governors = ["governor1", "governor2"] + mock_handler_instance.original_governor = "original_governor_value" + + mock_cpuscalinghandler.return_value = mock_handler_instance + + cpu_scaling_test = CPUScalingTest(policy=0) + expected_logs = [ + "INFO:root:## CPUfreq Policy0 Info ##", + "INFO:root:Affected CPUs:", + "INFO:root: cpu0", + "INFO:root: cpu1", + "INFO:root: cpu2", + "INFO:root:Supported CPU Frequencies: 1000.0 - 3000.0 MHz", + "INFO:root:Supported Governors:", + "INFO:root: governor1", + "INFO:root: governor2", + "INFO:root:Current Governor: original_governor_value", + ] + with self.assertLogs(level="INFO") as lc: + logging.disable(logging.NOTSET) + cpu_scaling_test.print_policy_info() + for i in range(len(expected_logs)): + self.assertEqual(expected_logs[i], lc.output[i]) + logging.disable(logging.CRITICAL) + + @patch("cpufreq_governors.CPUScalingHandler") + def test_print_policy_info_no_governor(self, mock_cpuscalinghandler): + mock_handler_instance = Mock() + mock_handler_instance.affected_cpus = [0, 1, 2] + mock_handler_instance.min_freq = 1000000 # 1000 kHz + mock_handler_instance.max_freq = 3000000 # 3000 kHz + mock_handler_instance.governors = [] + mock_handler_instance.original_governor = "original_governor_value" + + mock_cpuscalinghandler.return_value = mock_handler_instance + + cpu_scaling_test = CPUScalingTest(policy=0) + expected_logs = [ + "INFO:root:## CPUfreq Policy0 Info ##", + "INFO:root:Affected CPUs:", + "INFO:root: None", + "INFO:root:Supported CPU Frequencies: 1000.0 - 3000.0 MHz", + "INFO:root:Supported Governors:", + "INFO:root: None", + "INFO:root:Current Governor: original_governor_value", + ] + with self.assertLogs(level="INFO") as lc: + logging.disable(logging.NOTSET) + cpu_scaling_test.print_policy_info() + for i in range(len(expected_logs)): + self.assertEqual(expected_logs[i], lc.output[i]) + logging.disable(logging.CRITICAL) + + @patch("cpufreq_governors.CPUScalingHandler") + def test_driver_detect_empty_policies(self, mock_cpuscalinghandler): + mock_handler_instance = Mock() + mock_handler_instance.cpu_policies = [] + mock_handler_instance.get_scaling_driver.return_value = "driver_a" + mock_cpuscalinghandler.return_value = mock_handler_instance + + instance = CPUScalingTest(policy=0) + result = instance.test_driver_detect() + + self.assertFalse(result) + + @patch("cpufreq_governors.CPUScalingHandler") + def test_driver_detect_single_driver(self, mock_cpuscalinghandler): + mock_handler_instance = Mock() + mock_handler_instance.cpu_policies = [1, 2] + mock_handler_instance.get_scaling_driver.return_value = "driver_a" + mock_cpuscalinghandler.return_value = mock_handler_instance + + instance = CPUScalingTest(policy=0) + result = instance.test_driver_detect() + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingHandler") + def test_driver_detect_multiple_drivers(self, mock_cpuscalinghandler): + mock_handler_instance = Mock() + mock_handler_instance.cpu_policies = [1, 2] + mock_handler_instance.get_scaling_driver.side_effect = [ + "driver_a", + "driver_b", + ] + mock_cpuscalinghandler.return_value = mock_handler_instance + + instance = CPUScalingTest(policy=0) + result = instance.test_driver_detect() + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingHandler") + def test_driver_detect_no_drivers_found(self, mock_cpuscalinghandler): + mock_handler_instance = Mock() + mock_handler_instance.cpu_policies = [1, 2] + mock_handler_instance.get_scaling_driver.return_value = [] + mock_cpuscalinghandler.return_value = mock_handler_instance + + instance = CPUScalingTest(policy=0) + + result = instance.test_driver_detect() + self.assertFalse(result) + + @patch("cpufreq_governors.time") + def test_is_frequency_equal_to_target_success(self, mock_time): + # Mocking the current time and the get_current_frequency method + # This simulates time-based checks + mock_time.time.side_effect = [0, 0.5, 1] + instance = CPUScalingTest(policy=0) + + mock_handler = Mock() + mock_handler.get_current_frequency.return_value = 1000 + instance.handler = mock_handler + + # Set the target frequency to 1000 for this test + target_freq = 1000 + + result = instance.is_frequency_equal_to_target(target_freq) + self.assertTrue(result) + + @patch("cpufreq_governors.time") + def test_is_frequency_equal_to_target_timeout(self, mock_time): + # Mocking the current time and the get_current_frequency method + # This simulates time-based checks + mock_time.time.side_effect = [0, 0.5, 11] + instance = CPUScalingTest(policy=0) + + mock_handler = Mock() + mock_handler.get_current_frequency.return_value = 900 + instance.handler = mock_handler + + target_freq = 1000 + + result = instance.is_frequency_equal_to_target(target_freq) + self.assertFalse(result) + + @patch("cpufreq_governors.time") + def test_is_frequency_settled_down_success(self, mock_time): + # Mocking the current time and the get_current_frequency method + # This simulates time-based checks + mock_time.time.side_effect = [0, 0.5, 1] + instance = CPUScalingTest(policy=0) + + mock_handler = Mock() + mock_handler.get_current_frequency.return_value = 900 + mock_handler.max_freq = 1000 + instance.handler = mock_handler + + result = instance.is_frequency_settled_down() + self.assertTrue(result) + + @patch("cpufreq_governors.time") + def test_is_frequency_settled_down_failure(self, mock_time): + # Mocking the current time and the get_current_frequency method + # This simulates time-based checks + mock_time.time.side_effect = [0, 0.5, 11] + instance = CPUScalingTest(policy=0) + + # Mocking the get_current_frequency method to return a value + # greater than or equal to max_freq + mock_handler = Mock() + mock_handler.get_current_frequency.return_value = 1100 + mock_handler.max_freq = 1000 + instance.handler = mock_handler + + result = instance.is_frequency_settled_down() + self.assertFalse(result) + + @patch("cpufreq_governors.CPUScalingTest.is_frequency_settled_down") + @patch("cpufreq_governors.CPUScalingTest.is_frequency_equal_to_target") + @patch("cpufreq_governors.context_stress_cpus") + @patch("cpufreq_governors.CPUScalingHandler.context_set_governor") + def test_frequency_influence_ondemand_success( + self, + mock_context_set_governor, + mock_context_stress_cpus, + mock_is_frequency_equal_to_target, + mock_is_frequency_settled_down, + ): + mock_is_frequency_equal_to_target.return_value = True + mock_is_frequency_settled_down.return_value = True + + instance = CPUScalingTest(policy=0) + result = instance.test_frequency_influence(governor="ondemand") + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingTest.is_frequency_settled_down") + @patch("cpufreq_governors.CPUScalingTest.is_frequency_equal_to_target") + @patch("cpufreq_governors.context_stress_cpus") + @patch("cpufreq_governors.CPUScalingHandler.context_set_governor") + def test_frequency_influence_ondemand_frequency_not_equal( + self, + mock_context_set_governor, + mock_context_stress_cpus, + mock_is_frequency_equal_to_target, + mock_is_frequency_settled_down, + ): + mock_is_frequency_equal_to_target.return_value = False + mock_is_frequency_settled_down.return_value = True + + instance = CPUScalingTest(policy=0) + result = instance.test_frequency_influence(governor="ondemand") + + self.assertFalse(result) + + @patch("cpufreq_governors.CPUScalingTest.is_frequency_settled_down") + @patch("cpufreq_governors.CPUScalingTest.is_frequency_equal_to_target") + @patch("cpufreq_governors.context_stress_cpus") + @patch("cpufreq_governors.CPUScalingHandler.context_set_governor") + def test_frequency_influence_ondemand_settled_down_failure( + self, + mock_context_set_governor, + mock_context_stress_cpus, + mock_is_frequency_equal_to_target, + mock_is_frequency_settled_down, + ): + mock_is_frequency_equal_to_target.return_value = True + mock_is_frequency_settled_down.return_value = False + + instance = CPUScalingTest(policy=0) + result = instance.test_frequency_influence(governor="ondemand") + + self.assertFalse(result) + + @patch("cpufreq_governors.CPUScalingTest.is_frequency_equal_to_target") + @patch("cpufreq_governors.CPUScalingHandler.context_set_frequency") + @patch("cpufreq_governors.CPUScalingHandler.context_set_governor") + def test_frequency_influence_userspace_success( + self, + mock_context_set_governor, + mock_context_set_frequency, + mock_is_frequency_equal_to_target, + ): + mock_is_frequency_equal_to_target.return_value = True + + instance = CPUScalingTest(policy=0) + result = instance.test_frequency_influence( + governor="userspace", target_freq=1000 + ) + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingTest.is_frequency_equal_to_target") + @patch("cpufreq_governors.CPUScalingHandler.context_set_frequency") + @patch("cpufreq_governors.CPUScalingHandler.context_set_governor") + def test_frequency_influence_userspace_failure( + self, + mock_context_set_governor, + mock_context_set_frequency, + mock_is_frequency_equal_to_target, + ): + mock_is_frequency_equal_to_target.return_value = False + + instance = CPUScalingTest(policy=0) + result = instance.test_frequency_influence( + governor="userspace", target_freq=1000 + ) + + self.assertFalse(result) + + @patch("cpufreq_governors.CPUScalingTest.is_frequency_equal_to_target") + @patch("cpufreq_governors.CPUScalingHandler.context_set_governor") + def test_frequency_influence_performance_success( + self, + mock_context_set_governor, + mock_is_frequency_equal_to_target, + ): + mock_is_frequency_equal_to_target.return_value = True + + instance = CPUScalingTest(policy=0) + result = instance.test_frequency_influence( + governor="performance", target_freq=1000 + ) + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingTest.is_frequency_equal_to_target") + @patch("cpufreq_governors.CPUScalingHandler.context_set_governor") + def test_frequency_influence_performance_failure( + self, + mock_context_set_governor, + mock_is_frequency_equal_to_target, + ): + mock_is_frequency_equal_to_target.return_value = False + + instance = CPUScalingTest(policy=0) + result = instance.test_frequency_influence( + governor="performance", target_freq=1000 + ) + + self.assertFalse(result) + + @patch("sys.exit") + @patch("cpufreq_governors.CPUScalingHandler.context_set_governor") + def test_frequency_influence_invalid_governor( + self, + mock_context_set_governor, + mock_exit, + ): + instance = CPUScalingTest(policy=0) + instance.test_frequency_influence(governor="no_governor") + + mock_exit.assert_called_with("Governor 'no_governor' not supported") + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_userspace_success(self, mock_test_frequency_influence): + mock_test_frequency_influence.side_effect = [True, True] + + instance = CPUScalingTest(policy=0) + result = instance.test_userspace() + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_userspace_failure(self, mock_test_frequency_influence): + mock_test_frequency_influence.side_effect = [False, True] + + instance = CPUScalingTest(policy=0) + result = instance.test_userspace() + + self.assertFalse(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_performance_success(self, mock_test_frequency_influence): + mock_test_frequency_influence.return_value = True + + instance = CPUScalingTest(policy=0) + result = instance.test_performance() + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_performance_failure(self, mock_test_frequency_influence): + mock_test_frequency_influence.return_value = False + + instance = CPUScalingTest(policy=0) + result = instance.test_performance() + + self.assertFalse(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_powersave_success(self, mock_test_frequency_influence): + mock_test_frequency_influence.return_value = True + + instance = CPUScalingTest(policy=0) + result = instance.test_powersave() + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_powersave_failure(self, mock_test_frequency_influence): + mock_test_frequency_influence.return_value = False + + instance = CPUScalingTest(policy=0) + result = instance.test_powersave() + + self.assertFalse(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_ondemand_success(self, mock_test_frequency_influence): + mock_test_frequency_influence.return_value = True + + instance = CPUScalingTest(policy=0) + result = instance.test_ondemand() + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_ondemand_failure(self, mock_test_frequency_influence): + mock_test_frequency_influence.return_value = False + + instance = CPUScalingTest(policy=0) + result = instance.test_ondemand() + + self.assertFalse(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_conservative_success(self, mock_test_frequency_influence): + mock_test_frequency_influence.return_value = True + + instance = CPUScalingTest(policy=0) + result = instance.test_conservative() + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_conservative_failure(self, mock_test_frequency_influence): + mock_test_frequency_influence.return_value = False + + instance = CPUScalingTest(policy=0) + result = instance.test_conservative() + + self.assertFalse(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_schedutil_success(self, mock_test_frequency_influence): + mock_test_frequency_influence.return_value = True + + instance = CPUScalingTest(policy=0) + result = instance.test_schedutil() + + self.assertTrue(result) + + @patch("cpufreq_governors.CPUScalingTest.test_frequency_influence") + def test_test_schedutil_failure(self, mock_test_frequency_influence): + mock_test_frequency_influence.return_value = False + + instance = CPUScalingTest(policy=0) + result = instance.test_schedutil() + + self.assertFalse(result) + + +class TestMainFunction(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.original_stdout = sys.stdout + suppress_text = StringIO() + sys.stdout = suppress_text + logging.disable(logging.CRITICAL) + + @classmethod + def tearDownClass(cls): + sys.stdout = cls.original_stdout + logging.disable(logging.NOTSET) + + @patch("sys.exit") + def test_debug_logging_enabled(self, mock_exit): + with patch("sys.argv", ["program_name", "--debug"]): + logger = init_logger() + main() + self.assertEqual(logger.level, logging.DEBUG) + + @patch("sys.exit") + def test_debug_logging_disabled(self, mock_exit): + with patch("sys.argv", ["program_name"]): + logger = init_logger() + main() + self.assertEqual(logger.level, logging.INFO) + + @patch("sys.exit") + @patch("cpufreq_governors.CPUScalingHandler") + def test_policy_resource_flag_enabled(self, mock_handler, mock_exit): + with patch("sys.argv", ["program_name", "--policy-resource"]): + main() + mock_handler.return_value.print_policies_list.\ + assert_called_once_with() + + @patch("sys.exit") + @patch("cpufreq_governors.CPUScalingHandler") + def test_policy_resource_flag_disabled(self, mock_handler, mock_exit): + with patch("sys.argv", ["program_name"]): + main() + mock_handler.return_value.print_policies_list.assert_not_called() + + @patch("sys.exit") + @patch("cpufreq_governors.CPUScalingTest") + def test_driver_detect_flag_enabled(self, mock_test, mock_exit): + with patch("sys.argv", ["program_name", "--driver-detect"]): + main() + mock_test.assert_called_once_with(policy=0) + mock_test.return_value.test_driver_detect.assert_called_once_with() + + @patch("sys.exit") + @patch("cpufreq_governors.CPUScalingTest") + def test_driver_detect_flag_disabled(self, mock_test, mock_exit): + with patch("sys.argv", ["program_name"]): + main() + mock_test.return_value.test_driver_detect.assert_not_called() + + @patch("sys.exit") + @patch("cpufreq_governors.CPUScalingTest") + @patch("cpufreq_governors.probe_governor_module") + @patch("cpufreq_governors.CPUScalingHandler") + def test_valid_governor_has_not_probe( + self, mock_handler, mock_probe_governor, mock_test, mock_exit + ): + mock_test_instance = Mock() + mock_test.return_value = mock_test_instance + mock_handler.governors = ["ondemand"] + + with patch( + "sys.argv", ["program_name", "--governor", "valid_governor1"] + ): + main() + mock_test.assert_called_once_with(policy=0) + mock_test_instance.print_policy_info.assert_called_once_with() + mock_probe_governor.assert_called_once_with("valid_governor1") + mock_exit.assert_not_called() + + @patch("sys.exit") + @patch("cpufreq_governors.CPUScalingTest") + @patch("cpufreq_governors.probe_governor_module") + @patch("cpufreq_governors.CPUScalingHandler") + def test_valid_governor_already_probed( + self, mock_handler, mock_probe_governor, mock_test, mock_exit + ): + mock_test_instance = Mock() + mock_test.return_value = mock_test_instance + mock_handler_instance = Mock() + mock_handler_instance.governors = ["ondemand"] + mock_handler.return_value = mock_handler_instance + + with patch("sys.argv", ["program_name", "--governor", "ondemand"]): + main() + mock_test.assert_called_once_with(policy=0) + mock_test_instance.print_policy_info.assert_called_once_with() + mock_probe_governor.assert_not_called() + mock_exit.assert_not_called() + + @patch("sys.exit") + @patch("cpufreq_governors.getattr") + @patch("cpufreq_governors.CPUScalingTest") + @patch("cpufreq_governors.probe_governor_module") + @patch("cpufreq_governors.CPUScalingHandler") + def test_given_governor_not_supported( + self, + mock_handler, + mock_probe_governor, + mock_test, + mock_getattr, + mock_exit, + ): + mock_test_instance = Mock() + mock_test.return_value = mock_test_instance + mock_handler_instance = Mock() + mock_handler_instance.governors = ["ondemand"] + mock_handler.return_value = mock_handler_instance + mock_getattr.side_effect = AttributeError("AttributeError message") + + with patch("sys.argv", ["program_name", "--governor", "not_support"]): + main() + mock_test.assert_called_once_with(policy=0) + mock_test_instance.print_policy_info.assert_called_once_with() + mock_probe_governor.assert_called_once_with("not_support") + mock_exit.assert_called_once_with(1) + + @patch("sys.exit") + @patch("cpufreq_governors.getattr") + @patch("cpufreq_governors.CPUScalingTest") + @patch("cpufreq_governors.probe_governor_module") + @patch("cpufreq_governors.CPUScalingHandler") + def test_getattr_return_false( + self, + mock_handler, + mock_probe_governor, + mock_test, + mock_getattr, + mock_exit, + ): + mock_test_instance = Mock() + mock_test.return_value = mock_test_instance + mock_handler_instance = Mock() + mock_handler_instance.governors = ["ondemand"] + mock_handler.return_value = mock_handler_instance + mock_callable = MagicMock(return_value=False) + mock_getattr.return_value = mock_callable + + with patch("sys.argv", ["program_name", "--governor", "not_support"]): + main() + mock_test.assert_called_once_with(policy=0) + mock_test_instance.print_policy_info.assert_called_once_with() + mock_probe_governor.assert_called_once_with("not_support") + mock_exit.assert_called_once_with(1) + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() From 7dd40a1742177df8447faf2625465246be0c5b68 Mon Sep 17 00:00:00 2001 From: stanley31huang Date: Fri, 1 Mar 2024 21:03:43 +0800 Subject: [PATCH 057/108] Strip color code in unity_support_test results (BugFix) (#1031) strip color code in unity_support_test results fixed checkbox issue 629, strip the color code from the response by unity_support_test. --- providers/base/units/graphics/jobs.pxu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/providers/base/units/graphics/jobs.pxu b/providers/base/units/graphics/jobs.pxu index 827ebba277..59745f45d6 100644 --- a/providers/base/units/graphics/jobs.pxu +++ b/providers/base/units/graphics/jobs.pxu @@ -265,7 +265,7 @@ category_id: com.canonical.plainbox::graphics id: graphics/{index}_gl_support_{product_slug} flags: also-after-suspend command: - "$CHECKBOX_RUNTIME"/usr/lib/nux/unity_support_test -p 2>&1 + "$CHECKBOX_RUNTIME"/usr/lib/nux/unity_support_test -p 2>&1 | sed -e "s/\x1b\[[0-9;]*m//g" estimated_duration: 0.131 _description: Check that {vendor} {product} hardware is able to run a desktop session (OpenGL) _summary: Test OpenGL support for {vendor} {product} From f00961ccbd1a9436d530cb38ad6878544840496d Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Fri, 1 Mar 2024 21:54:10 +0800 Subject: [PATCH 058/108] Fix errors in GitHub workflow yaml files (infra) (#1030) Fix errors in GitHub workflow yaml files --- .github/workflows/tox-tools-release.yaml | 2 +- .github/workflows/validate_workflows.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tox-tools-release.yaml b/.github/workflows/tox-tools-release.yaml index 5b540d4820..b4204ef7eb 100644 --- a/.github/workflows/tox-tools-release.yaml +++ b/.github/workflows/tox-tools-release.yaml @@ -13,7 +13,7 @@ on: jobs: tox_test_release_tools: - name: Test release tools with tox + name: Test release tools with tox defaults: run: working-directory: tools/release diff --git a/.github/workflows/validate_workflows.yaml b/.github/workflows/validate_workflows.yaml index b6708001a5..82a9a69554 100644 --- a/.github/workflows/validate_workflows.yaml +++ b/.github/workflows/validate_workflows.yaml @@ -7,7 +7,7 @@ on: jobs: workflow_validation: - name: Workflow validation + name: Workflow validation runs-on: ubuntu-latest steps: - name: Checkout checkbox monorepo From fc2743a0ceb5823a4f32a02ee880892b93eef98d Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Fri, 1 Mar 2024 17:13:35 +0100 Subject: [PATCH 059/108] Use pylxd mount disk (infra) (#1032) * Use pylxd api to mount disks * Apply suggestion from the review --- metabox/metabox/core/lxd_provider.py | 40 +++++++--------------------- 1 file changed, 9 insertions(+), 31 deletions(-) diff --git a/metabox/metabox/core/lxd_provider.py b/metabox/metabox/core/lxd_provider.py index a3bdd390e8..3b062abc4c 100644 --- a/metabox/metabox/core/lxd_provider.py +++ b/metabox/metabox/core/lxd_provider.py @@ -285,40 +285,18 @@ def _transfer_file_preserve_mode(self, machine, src, dest): def _mount_source(self, machine, path): logger.debug("Mounting dir {}", path) - output = subprocess.check_output( - [ - "lxc", - "config", - "device", - "add", - machine._container.name, - self.LXD_MOUNT_DEVICE, - "disk", - "source={}".format(path), - "path={}".format(self.LXD_SOURCE_MOUNT_POINT), - ], - stderr=subprocess.PIPE, - text=True, - ).strip() - if output: - logger.debug(output) + disk_config = { + "source": path, + "path": self.LXD_SOURCE_MOUNT_POINT, + "type": "disk", + } + machine._container.devices.update({self.LXD_MOUNT_DEVICE: disk_config}) + machine._container.save(wait=True) def _unmount_source(self, machine): logger.debug("Unmounting dir...") - output = subprocess.check_output( - [ - "lxc", - "config", - "device", - "remove", - machine._container.name, - self.LXD_MOUNT_DEVICE, - ], - stderr=subprocess.PIPE, - text=True, - ).strip() - if output: - logger.debug(output) + del machine._container.devices[self.LXD_MOUNT_DEVICE] + machine._container.save(wait=True) @contextmanager def _mounted_source(self, machine, path): From d5c2b4a6eb440c4c9d321be3a4afaf9a3db848c3 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Tue, 5 Mar 2024 08:58:45 +0100 Subject: [PATCH 060/108] Cast back exception to local type (bugfix) (#1033) Cast back exception to local type --- checkbox-ng/checkbox_ng/launcher/controller.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/checkbox-ng/checkbox_ng/launcher/controller.py b/checkbox-ng/checkbox_ng/launcher/controller.py index c08008b07c..bcb4eadf45 100644 --- a/checkbox-ng/checkbox_ng/launcher/controller.py +++ b/checkbox-ng/checkbox_ng/launcher/controller.py @@ -376,6 +376,13 @@ def _resumed_session(self, session_id): """ try: yield self.sa.resume_session(session_id) + except rpyc.core.vinegar.GenericException as e: + # cast back the (custom) remote exception for IncompatibleJobError + # (that is of type GenericException due to rpyc) + # so that it can be treated as a normal "local" exception" + if "plainbox.impl.session.resume.IncompatibleJobError" in str(e): + raise IncompatibleJobError(*e.args) + raise finally: self.sa.abandon_session() @@ -405,7 +412,6 @@ def should_start_via_autoresume(self) -> bool: app_blob = json.loads(metadata.app_blob.decode("UTF-8")) if not app_blob.get("testplan_id"): - self.sa.abandon_session() return False self.sa.select_test_plan(app_blob["testplan_id"]) From 5bbac35a1ae68a42893e2d802c96ba4dcfcaab7e Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Tue, 5 Mar 2024 09:16:58 +0100 Subject: [PATCH 061/108] Removed unused screenshot validation test and related file (BugFix) (#1035) Removed unused screenshot validation test and related file --- providers/sru/bin/screenshot_validation | 161 ------------------ .../data/images/logo_Ubuntu_stacked_black.png | Bin 23419 -> 0 bytes 2 files changed, 161 deletions(-) delete mode 100755 providers/sru/bin/screenshot_validation delete mode 100644 providers/sru/data/images/logo_Ubuntu_stacked_black.png diff --git a/providers/sru/bin/screenshot_validation b/providers/sru/bin/screenshot_validation deleted file mode 100755 index 5529f799ca..0000000000 --- a/providers/sru/bin/screenshot_validation +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/env python2.7 -# Copyright 2014 Canonical Ltd. -# Written by: -# Sylvain Pineau -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from __future__ import absolute_import, print_function - -import argparse -import imghdr -import os - -import cv2 - - -def create_capture(args): - try: - device_no = int(os.path.realpath(args.device)[-1]) - except ValueError: - raise SystemExit( - "ERROR: video source not found: {}".format(args.device)) - cap = cv2.VideoCapture(device_no) - # The camera driver will adjust the capture size - cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, args.width) - cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, args.height) - if cap is None or not cap.isOpened(): - raise SystemExit( - "ERROR: unable to open video source: {}".format(args.device)) - return cap - -parser = argparse.ArgumentParser( - description=''' -Automatically validates a screenshot captured with an external camera using -OpenCV ORB detection and a FLANN Matcher (Fast Approximate Nearest Neighbor -Search Library) - -Put your camera (HD recommended) in front of your monitor. -A query image (the INPUT positional argument) is displayed on your primary -device and several captures (see -F) are analyzed to find a positive match. - -On success returns 0. Otherwise a non-zero value is returned and a -diagnostic message is printed on standard error. -''', - formatter_class=argparse.RawDescriptionHelpFormatter -) -parser.add_argument('input', - metavar='INPUT', - help='Input file to use as query image') -parser.add_argument('--min_matches', - type=int, - default=20, - help='Minimum threshold value to validate a \ - positive match') -parser.add_argument('-F', '--frames', - type=int, - default=10, - help='Set the number of frames to capture and analyze \ - Minimum: 3') -parser.add_argument('-d', '--device', - default='/dev/video0', - help='Set the device to use') -parser.add_argument('--height', - type=int, - default=900, - help='Set the capture height') -parser.add_argument('--width', - type=int, - default=1600, - help='Set the capture width') -parser.add_argument('-o', '--output', - default=None, - help='Save the screenshot to the specified filename') -args = parser.parse_args() - -if args.frames < 3: - parser.print_help() - raise SystemExit(1) -if not imghdr.what(args.input): - raise SystemExit( - "ERROR: unable to read the input file: {}".format(args.input)) -queryImage = cv2.imread(args.input, cv2.CV_LOAD_IMAGE_GRAYSCALE) - -cv2.namedWindow("test", cv2.WND_PROP_FULLSCREEN) -cv2.setWindowProperty("test", - cv2.WND_PROP_FULLSCREEN, - cv2.cv.CV_WINDOW_FULLSCREEN) -cv2.imshow("test", queryImage) -cv2.waitKey(1000) - -# Initiate ORB features detector -orb = cv2.ORB(nfeatures=100000) - -# Find the keypoints and descriptors with ORB -kp1, des1 = orb.detectAndCompute(queryImage, None) - -# Use the FLANN Matcher (Fast Approximate Nearest Neighbor Search Library) -flann_params = dict(algorithm=6, # FLANN_INDEX_LSH - table_number=6, - key_size=12, - multi_probe_level=1) - -flann = cv2.FlannBasedMatcher(flann_params, {}) - -source = 0 -cap = create_capture(args) -results = [] -img = None - -for i in range(args.frames): - - ret, img = cap.read() - if ret is False: - raise SystemExit( - "ERROR: unable to capture from video source: {}".format(source)) - trainImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - - # Find the keypoints and descriptors with ORB - kp2, des2 = orb.detectAndCompute(trainImage, None) - if des2 is None: - raise SystemExit( - "ERROR: Not enough keypoints in video capture, aborting...") - - matches = flann.knnMatch(des1, des2, k=2) - - # store all the good matches as per Lowe's ratio test - good_matches = [m[0] for m in matches if len(m) == 2 and - m[0].distance < m[1].distance * 0.7] - - results.append(len(good_matches)) - cv2.waitKey(1000) - -cv2.destroyAllWindows() - -if args.output: - cv2.imwrite(args.output, img) - print('Screenshot saved to: {}'.format(args.output)) - -# Remove Max and Min values from results -results.remove(max(results)) -results.remove(min(results)) - -avg = sum(results) / len(results) - -if avg > args.min_matches: - print("Match found! ({} > {})".format(avg, args.min_matches)) -else: - raise SystemExit( - "ERROR: Not enough matches are found - {} < {}".format( - avg, - args.min_matches)) diff --git a/providers/sru/data/images/logo_Ubuntu_stacked_black.png b/providers/sru/data/images/logo_Ubuntu_stacked_black.png deleted file mode 100644 index c807a37a61d6f1c66e8c7dbad59bbe94bd2f5e9f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23419 zcmeFZcRbZ!{5XCux2(*_3fG7b-Vrjcolzo^y(**3Y_3fxva*Vhm6Z`8}XtKlmXTWc$i?)gaq`2!M7W{xeRMox>fgrgd zkbtKU$UgWKumFL${R4q4nnNIxuOJZmCvR%Bqyd27!7UXf$Vpgq2O9h$d7^6I0>}=Y z{(&l7PVt053|-ZfKIjZvGZ2k@wFnmo9&DFY1rZ$dyl@y zS;T4!-?(vKi#9}yHkRSOD!ZBz`^%?FN*t6l92^{{e>j4kwf+MaK1bMi`2B-(*PHTf z(`TaeK-UNcaAn&3Ib8(A%H>5pF*Gz26j0JM46Zh5YN!@ii}h;<(W~hMy$Cj-7d+6aZH_UQRYxyCik2HB#@BgmWOg$}SgkKFl<2 zb|0yt)4$Tak~r=QKMP3yR2{@!FY#2;vNO8*3~ozIWik4>>cobcA{u?I!)TDlgyJ(h z0nEVN$ID1dQY@6DS#V6qg!VHX0qnKg@=zg;m47E2FH4kti|{(&Gi6)Q?j;3j$nY7o zDm`(jN&q}KN{Cs2g*qdd_2_PmdcY-s%f-g4(Dk}fXG{oDPQ;lXaJd*>!t^Sh%^H<6 z4cS%ILS)z*0dU3olojh(np{R<-<1H@F^C9^xGdA#pxl*#p}@@5xW+Spga9^F>V-_! zFi9XC9#EsF>H*{k&yKV4us{b4byJfBIp02q$S<-O#?3ok|0K8x3q5uLbHLxAxsutKvOqD&jH>>Tq`|R!*#sT|AHU#rb`uNuIcN|L zYm$uL{Y1;2YMnzyo<|K=O{veptcmZ?vm0xuWP;yGfyGzGz3!>?e__Q25hj2y2^EnT zmm2GelAB2v^PWNE!w+S2kuvl>>(#D9fuw-B=I28gm+F`g=vzswmm?tZhVQ>M7U|8N zA^XHmrUs19GSGjkdnz-$-OHZ+Wf(-B>9Cg@5gcKAthEn^YTKyq=!;|HyH}#hMrzJL03-2uP=l*Wg+JU;Wuy2>3L}2GGJ8d+rcg-#hmfyQ z02;7aHs_CY|d zJ=Yj9Nd5k$jCrdF0Za`u!H(d}?Q%`K6?Q71^NXAkoajn|j9se$K>s16wnUv)_NB~| zR(=B5^WLW6OQDiElq}BS5P1b$S7KV|T|3NUY-5-)hg{4#BExK+Nu9z2lYIzBG$8e5 z&IZeSeKwr!7MsEh6*P#;Gqs&?se}IxqWjEGh!;@5fdK7ij^u}7u)#Um|YIORLh4|6TeH~&>RGU zgc7Q8MK4MqNhZ4$MBA6TFWI`r_Yb2SNg09NXsb7--3Fb=0PLd2}X=Iu?2Q zxJMiqku<+akonw7NC4wN@~CM}1}&}`nmI>3UdnWeQq10aiep!%pn}KrAT*FP^mp9` zqslHxRA-MrmLdGZLAxNxNfNpyB7VketdoE{pA|l($zd%hH^7Ce7(pc2&XQXwc4UO zEAf4n5H{HkBDx~0A8*APgc%B^f`XGU>`_(eL8&h7-6<<=_%u;ceJVCY8% zfnvXiS_;>jY@QZ$Py)yL*7*b52!Ez~OLdJoAJUR!aIKdKVD*QK>x@g+3?A#3q>s(o zmul?)9Z2{xp8a7g&W?O`?ASiZ33g5;HwY#_>CeW6R#_=-1-$7kp{5@3cUXuCoO0hes5^i zZ75+66AT$r4EP*OnLY1x)l9#Cg1(}!KA~KG4&jLFG30=r@IQvgI zh5!~CbOo>&u^KwY9e{^G#RFa-9nx81-zhl_(tLj;hb3YwmjD7S>*k8l9 zoF~-8kzt@UH_=Fn#`;V%>5Fp~WKe<~v72FvdFF@VcQ=*~)@O~1t44?0Kj=i3bh7d|HFPTHA!29ah?TwU~^Tf)5 zF7pVpi$F16QZNPqBQ`z|Xp5#2L#+J7KFQg?zcOTEy{k7myW2BSDtCbz{>1L>4M{G( zluuWWkI>$pacCpEMV;k#t$|YR3wBiSC(CNs_&lypzUb&9A&D^2-UoL*zM91wNk@`e zoQD!rZFAvZy8mrYMI!a9l4oOujqZg#QVyNmU8b_ zusg2W#dZ+y4(W|PTm6^pIY>T z7i_(|d5zTQ;rSCcWBKp(&_*!AGWO(EUiNPDm$H+S=<#qT&#!9l74Ryt_0=eYx8{Rt ztGKJ0-qvnsD9QZDnS;B(|jT*^V{Yv(!Vub7oU^veyA12tDRwkdsCaXu()Vg@uX{4>B@a+ zQzS8a2=I*cEF8|G?bT$oh>!lNNP}Rsr3BY;twLiGCYTrt7EFjzUD*0I#^dC+;0|v} zQoOBH>UzIP@wJ;sDt2Gsb}`S`;GAYfRfWrLILCADJ^Gr$T`M=H)*^XZNrCrp{UlQT zFv|GKx%&YvQ>2-;!%TJd=)}bWH6#PO8!^?7kt+y19jU10M7V45@}_agUQt)Gk}+rW z6apLg^dq~RxnuItsAYb}7n^{!go7uM^Zo6$M-llEg{~o)d+P4+Qv&)a0*JN9k~-_+$i=+y}!W?X$5&mm`yeZTH4H}`Zj;Xt=qsG{xrV(rrRrh;Tqv&vT?2;mU)$^mcIPrAR0IRA?Ticq}P1RC7{-e^gho@SM_Hcjtz@bMCi<}S2(nd zlDN@%6eGW%AU5S+IKGqY(~l`{npl?~2C{GDjY{iP=w+o>#gBaHwv-*-!lHSUYs;@N z_>M?j6$p1$T_3NMI1ZyeU1}lF>V$8>>8_H<>29*B{$hkg_~ws_g`$Jjz817umAm)U=+6({xn|audeN9$B0Gi; zMtsFsK&)*ncckCBig|G7)$iu_=J{HD*7Wtgl>9v+`WweL-?fte z0mk%5hL_vzAHE+f?sD32M{C6L(O0Tb3S|STitRVJE13#IlFufi+$(=rn$5|)4)LN_ z{R7PcP{$R?qqzCis1IgazG1CGD4Pb4W=;e#)hmh1SBvNz%2u|obh8K+mlorKIT9+> zZBF-Uk%&uH6~nCUraNAB)JtWB_9aho2Z%NIA!95w^coPSxoIQUYZ?8%BI6F3lX<{8+*D4yd5vOfDd!WZ6BR5L1T z_JFnCV$+~7Ej`I|BWp6~YAeWFC^wU+KQ7V7@4jkPt7^CLt6V%7BrqvXV11wpC4g~j zki1D`v+tjlU4(H@`qK^?;{B54lKf*?EmUOy*mPSq{S11@b(cS*o$bW+)@Xn$!h^1U9$njgi&BIO+?EKdJkvWCZ%=Tz z$%ma(R|P%4AjHhJf~tzk{yiId;&BaSJw7B==TTYKHdpd`cmY*@gJm%*Y+Y(yf@Th#8)O#~EaK4k;s@*BB@#GOIdjWu* z_46M-yh9vGLc1xWK4M%whJANjD{yxdzW{D3TX&?b_TB3c_rO#&bEFe8cP*_emeoe} zIst6B**mS-ko;hXbCVBETyT7ClyibUeK9+Xi6#mXSnM~EBsVU;ocEQcviBo{%**;F z4;4x^N--{QCNPopO#5m>?&#y&v13;_dA4!*z%RGW786X~rW_zQ$eG6Tfv2Np&xAwh z?MY6%UGDhi=xxCuIS^%v#n3MLA6oK~S_%=}704j7$Zea?TbXtEKXa4jZ^6krkXVVi zkD{-N3U}GM$7a;1@~GfFxRiNbyAmy-^o`M5z8;J#NoyV?DdV35z$N=FRst=epQ6R% z!Lv`$8WXAPEjHrFe$$Y=RuNYD zj$t`t!bQI@JM1D2aFfJbpDvRI-Xn_R*Fn}jB_GxN=?_r#k2Wq~*~4IfSIemGYTGyR zMDu&gZwaT#x1B<}DVWePtP*koauI{#OpMaOPq-{)F2}vNW>GR2FD-zWi!p`~yI$Ooa1mYDS<_$i3!o*H!g*}*r5_zABYW7Z ziK!O!=UwK6^@*1J(vMs$-W@v#;)l?)m|%*w8v$}H=Vh#m$%n_5wvhfWIUDY;9F4A& z`-|VAl;i?w0v2)Pn@{2U-Et+{5+5YD0*;;5_iS9_4!*{m2UK5dU3a=Yur~fEddwx) zP?mQdxold5d#mUSEH=3%G-0?>mEpMaLn;Y3QL`J>GX8<-0w0Im+l+~&+Zz2f?fOe-g8xvyHu|ItgE+0W_U98jyC-@8zYP8x3Z8QU4_o~W>@ z;Y=F9sj|UsrL4*jJHLZh!V@-;NO~--|J?!?f#I+()6V+${-9EfY`UiNViVa=EbOm8 z!Cd6%XGXW0y#1xI%6r*39rsC5n;QPi;qw%q;?l9|?TdUaycQ3$9UXAjvubjW7UKq` z-J^))??9nJ3ly)r<*hz8vo75e&0Dg>28LyG$We_Eg`U4ts$BTmjdQMyC>41gsUJ*=~l=kzc) z^dy8OPj8l`aI7M`S4Q(%r`d$Y&1h*ZS|Bw}NXNtQ^P9`-eWPA z!2JG=>U@s%j~6MNe_=`hR^{kay7!IzES;>oHZrvHlfhp(J7>*OSguJdU#dwUoQc7m zOK}p(4r8SOId=;(yj805!381}v+9O#P-msnc_(pyO;xP_=md^zGy1`AMkpbGXta8B z#F^2`4Xh|IkmB~nOsS+`_k#?UQEd9tv#3oVTcwWU0Lik^#p9>dQw4RWlt!B)P66BT zb05nmYMSmgR_1!<3sBND7bqeZemQWZy36ohM@`f1#!B&uO(KJDT)JC-+3S9+A$6=}jD{l9W&Es&iDfTCS-3*l_v7%#8r7vaY zZN%rl&WGh42z%l}k`_2sFKQ{3iRMyz|o(0RIeL+MTx9h%2}r(+gMY)eSN0;L0XuLP@W!mwhe zeLd1Qb9Z6n)akGEUg@qquGvn*^(ZQUg`w`>Ir3O{#YEiBs6nDh#sAdj=S(^`rKd}5 z&2Ij2ASB*8%}gM9MC9;nHg7@qHqVsRB`N<$1M{Z-CL?^m-6gXxJ;#0knl9yYx8$;{ z@oB&y*%w`M$wJq!#IbsvF4Nw7SWU#7f9)y*t6r1KH_fWOl-mz8j|f@{kMKoCe?pJ- zy7lqp>E>qgiJqcG&F&rXl;#cF!yeDev!W{=Fl1 zOi2f>{4-+4`S7*vO0IVFPEBrFQGQhgZt&>C9U9jjR49E*lPNAhj2? zFnrr8y?N`b0KOvL&eHyexHkIj0dFUbeVLZr?;b z?>F15<^@3HU5z4h-+B@D8dckVt)f@i8)oo7Z*%UWP#_L@u0#AU9-hwmw}ny4cGF+8 zsV7*@^ZK3>>fw62oqEsZ>IqZa7T7}N31#K=u4*~M5H(>BihZ+UrE;B)srM8D`hH z@xO0a$*-g7LPmFHfSX}=83-4*7@4%Zug7Hoivu*0sxqGbN>NT2H? z`I*N-P5rF3I|;Z_#RsRSJ~tHA*dasRg!S4*j9G~%x_gx+k=5<=>?Y}R962$ZHB#Hi zYeKn1v@+c@`@y5G=K>v=cC~lym)jhurnm9a*q2^i3i!j#KRY-psbep4$IQBcg^;7bHo%|M0?b&zkwS;vg6i-eV(vaQ@O- z!k^=PrFW_+dDs1FgR4JMQpmU0FO?t;<&drH4a8KB7}xg)9m~}y`*2rO-yU(q$fMh# zr8F0Gz}f!shNL&+n?|P5pA}*5tRg>T(blB}*+ds#0)lBvD2>tx^_qnpAy?*#{iRQo z6!i-g5z~Lbtmtke^O#}{k7&eZvB(gjM3ef$+%3xMT#Qfx#K@t>hlym|h6dXA+JIG= zi~sJ*bP9%$LJeFyqbCX%LKwMymv6Tel0<~5yE_C(ore*C& zqbyW6dD+k`VNgc*7Ns~Bc)Yw_wV!I4#OSuk4Q`0e3B75)Kda*O{sJAr{qdaXphp9C zM(8;m!<8VnhJHWq$F9s5or58P?}F2KhC7u}hode+c$E1^GjGJsCO>vER0-Ux+PBzt z?sg)*O6Fhd>eUW8j4f9s$w%WJDT0E8p8i#@f-&pw?SW2dwfVWt3v&>(BSE!!JkDIP z41n=(=Bu~e%NV^zxuW6bjifz6^vZbyq_N* z;zx`faFxCKMl3q?sIJ>ZHcO-qBD|Y6@8?VcvjL#nThfvJ+qv0UTA~cc@0jeiN44T+ zak1bX`=h`XVp7KTNAH&3Aj^w_W*i-}t3K6bNIFUh{fiGgz9d@^aU2WXiqo z%fT^JdshLO$36*czU9kXwVwG{)Bompk$%JcJE`(6Z@t^W%wF;VauekB*22?;@L=Zi zI}R~=k?k_W9(tX+OA1loX1`OsW8jhCWG1VrA9O{0Jg%f}$L#}7P7wo?;L&`LZEWi& z+~hx;|EdmlB)q04M~c2hd5wz&q-u`W9Ljbd?%rEVEn1e@8uRS*w)UbY{sMB3r9B50 z7cO31TgiPc5rKcYr-9owd+Qg4`$ut#8bHTRM!n;1c%aowkM${V<1X(#CXDp z)jiR%RaDDm3#*|@T&;Zw2|nl2@_}wYM!|vae3>rs{9#uk+|wc}yz>{xG^t89lns8Moo@-l#go!3HD<_ON zGS-*BlYmxT&!tnuyDCSD(xgTopy89Q&lTvm)hPeThJw813ddG5cEn!Dd(6=>IHyup z_AJo^b-u`sIXigV2$i9E{Ucm=&>c~t`oj*t{xc?ygQt{K^X8wl=8-{iN=+ z3+d%2d+c~cq3e8zx)_iOQCA!7b#0*8VqL@S!CT)<3x*#CsmIzozEaizCZ0W^D^wg{ z-tl~c*3g(($!?Y0dJ|cm9vB-Ks_gXBYBiYYLv!-U+c*r4-19fdn$q(|6*H;*IW!L)!gU=I5VrJ38t9 zOv)y$lmDGPezoO!`~It+Qo&uTg7kEnlxdznW+~&Q4pFP$YH!$Vv`AtDLp}n%uF_?Y z6yL$;`dPQ5D6{KNzW6WryVPYlh$8x?OKVgt(;(G9{r-$CR$0C`jkk6^Fx(DY?G`*~ zDL2`ceP<)Nb6sFn;(3xPxzjE`>1hRqLfDUsgT+IATNRdK!rS4Lhx?8x72}0gzL>R7 zl>{ZJ=#F*nGoUnvfJ~~1yZVa)Doy5&1G7<>ztv7w){7eLiH(E^>#i^(UOmH+!6djW zcwb`R7w<&n_I3Nt7OQk$+^ZH@^H?~d%1^7r{6wmM>TViO)6Yv_ys64b023RhNtq*U z7-TwnW9&dkXNq$@3_jrQS6-0AKdqI0X4{t6>AaV5FhX&f*;hvYNL2_?*qCn}JA6O7 zBXw`$`c`LzhgqMtAFB0%;>c}H|14idvqr(!A$&q4QC>_^tq=m3?rLHRmh_(1V9KIE z)xynt*|U;S_C^Pb=Wo$_PiEVv+xmrtno*7i+hc`* z&s}0|L&ZHlk_z;>2?ukALuGC!xb25kuEwq>n;!n3Hl2^X^3Oa7t>ZPK&jv5(x3X@` z*Cwvp4w=OtjKivfQjgZR2QVtTh0S9d1A(_rDoSpDOcQ;6bU&2q^!batm!#w$zAN(k z-|Fv9dt}(G)S+{x?q$6TJ7!o;=P>FD3t0?u1p^<;Z6;=r8Fm*O?08lSnFwhY^l6HQ zEUO1E;G-2X80I_MT3HETm5J?LULgZ_3&vUas(wDaT1*7F=Wfj7)na&!>}fNVHhB6e z+6vM{nPBp2QmRm+;xLWxhhl14k_@p3B735hoGxZvOiPD`nG_J;?N^a>@KdKGM6Fpp?mcs>E_@%s0S{`co<-cL8IDc|9 zH{3Z=<#=)wkwo8FAk-RXFm?tD{Q0>{&{`!5Q*lyfUAkQEU4c5bDa-oOT{iVG!9URl zH#4&GMOHUR5u-@Vd-{5vQ?@lRvFKsE+OuPMvNfVBm2c^>r*3aBba#8&cP%>Uy-^qr zqQC?yBBp&5{DNqM?j`Mu+}TEz79EpfOt$O3-<#mC-$XApu`-fSL`1P>(}LIdM72&| zPIZ)L>fun?3D$dQ?1dfj^_)sr1=`k7zS*ZOBltUq+|SlMri^OG>_wron(?UYCy{99 z_Y6bGb=RkB_Efx_vT~r9IFR3#%c`E#f#3TK;>S`E!-#U)3!Aa?*c5APvhPb*^W}qG zL3z`Q^%2~cF$Z)sb%Ix@ld0KvA@su0O;wYbdZ-?t(;C7Lo}+g9IAQ+vJpI2F-~R)zO-p?}}W! zAhf@dI2 zfX`r=?ND4oYBk*|uxg2CZ=6u`4x1g2iK-w@Iie&7g+f6LT=Pc@qz&K2yQ?SglY=Px zK}~Gr*2RG$j)#P&6=M`xIeqNOQ!fTP1{m`few42088|u``$(CMaEgeX){afB8!6~r zJ6$ zyDM1>>RiJ7j(_GGW}2_46ofm2;O9D<8NFz7^`_ z%sz=wu`WzLM0~N6ha&KBmthB0Q};TxN|;YniQk zXi($pP%EN>ybON`6_?`oEmvVz?L1E~Qm=SnT4V3JeBeUxtXk^-I`8ysy1LR@FY zn|jklqAVMhE0RlcrT6xGIJ3a$Ekzl}xgwbdcM8XQS0&erae*qjpziaLugv*u>>I!Q zi-ISc1-SbvR^a1f6Y$aGKtLn9+V;=@8yvL^@b(k!smDA{_UM&aF2?5OP)Z1cyRlh` zTKBB*+@qtjUf)d`BeE&4pFZ+h3!Q%De)HToqn^T}bNrI;r`X3ZVjpj1z_1pY|AB8* zl+)mXG&VTW4#e!#MzXJuX_RW2HZmG>E$42s*;rfzRlpZbIW!WkqQesB^4hD!yL#&P z$Uu$0+~-cr-RgMeD841pkw4( z{O`hM1=IMn=8*oV*G_uY*7Gdl#(lJneS4{xF#2ln&iE~5&$$0WYsO3^s zPm7cJ*b3SYoMm|AA8cge0&kd7!KrYO?JcbTaCzR{FXaG9Kh^z2n3KTIqu}*WNMN2X zIp(}W^Y=q;U)5Mpw`Y;c%7d7-y4iOhOBgN4UcrOiZIY_krxZ3nPL3}X0s z>)Cyu1Vn&(`++?Tzo+s2PRjc|kR&j=bV6CJyPrhoqzHE(D#$4pV?o$n&0_oM?N;5N z?u4Oj&DgZE^OoP&FhsCjB4|*ycLbj9MBBLQTdgVxEzdn_6p>RJ&v=QlDbM@^IyY!{ z-NF5%a2`xDxxsO1_wl^tG=N=;Go8qDPqQq1Xz& z;#@ZZok5dEON$!_2+wTB4I`cc6>}U(_!R%MAkA;${+56MmhWtyh83O3W~l3Egd11>hWnK2!^8&*VlnmFk)@2zrhI| zW;@IEdEg!J(`Kn3U^)7AdIyfj4Pf=@@rmzn4ms(@v@c z@bznqCKcVgP{=03YoZMoCV=q{qOIQJ-mj@IE&5@d37CKc6N5va(qvZeVigFGpe?9j zBc}q>ht4VJ^2OYMxq?onXBGAsM)M4N^7-NzP`)Vqa6ZV@^p4Q@=a-jGd&3OMoRGt; ze?slLQ&eK@Z2=Mblv}mUF$Bhm(R>ZKy7w(i>g%UA0B= zS;3GQg{mG&Dd08B{fhePTFmenqwIFV>p+@XznVfVNxpA+={ps!_aR`dSq#7K?~8MF zbaX-v-p9U#bWy=uYwCp@HJd48_oJe_)aVJH0Ec9~=({&;GnCF6mo5c2fqfPwfO$DR zaSQ9P#P1K8ymtvv2d3a5GFy7IvG*R?bA3JpfsrBy#s2c+xT8{BGLmvqOUkQP&v;Aj z9gsiAg*pAr5W}Ej?A!XI4?>!^);Or72w;1nrPG~VZFs?lTzRRU^Ze*-wKayHz~x3q z4))6S;+P_I+qB+0E_qI6rCxa-0XjSa*yr7SwwK4sOB{+KST8jVCq6HC@SkfmtI3g~ zRE5h0P)D5{<7@d@QK;!q?25fTcq@v;(~BDKnw=jmel3$bT^Z7iLLCn_z7Ku&hV%dD z&3Nn4tXK+&>Ujbf{ZX~mB<`JgysJ0_ltRAr$Q=|+9iycc1pS7O;BuRV-qV{=ADX_< zf!0GN;uz`05ln@-o!fm-As=v-fXnPG=$h-%H3E+w(6@n*wxrZEp#19>1WW)v0$v>; zplzUy9()ynsn%1$A#d2_HbWrLpXZ=2g%Tpfx!2b$_FeZ(K`@PuxMuakrqF38Jji36S{p}bVcus_)T97& z6EVAf8Rh->$#GH@YnnujCv+9vXwQr%u1H%57)9Geg)ja}y z#Cb5!3!Rho@RFnWc%s(o$aZV-$LS^7{9WTS4pk&nud;k}<2gl|pcvHq2AlD`n)z4d zCYnwG%f9$`7g~lkk_L86R>YtLF7e&r&KB>)G94=Ec{%H6wdj+qVu(c;sk<{P90l|A!qR zx$xDJ-7blTwM(#P+IZ{4+S|W5IL=)uWVeRWge~F|Yc&F%ek$*_`4?#%gi-k5O@Hxu z`2A$cAItxuqk$)hu9OeC5pe0?H81>&%6g?RBb!JyWA{T7m@*ke+2N4_%=*pxhJ-Ll z4Ejuar$;K}x%SVb7ytdv@5&%6b~`|6GZgD^9{!KY-uufc+)(G7{CAXeH1OU!t4|}q z1GQxMzs0HroD&&(LhVezmHc@0U95BI!IMZIoR1Y8A% zS^fNfO&AB^b<1i4494F6XUP74a438JFdrm|Sx991ub`Z$9P|=Q)wRL>=wEZF6If)M zmu~|UOv!j55%31{i)pJl0sDy!S*AwNgM*%ujU7t=8)*AhzkM zx@Z&*X@mSS{9RUfQ?-;U(sQn>6|KNJIP6AX0m?%-5NBk>C?|@-whk!{&!RkRNL6$aPu>nX@u*gY~5*YE`phxvMQf) zt9}%f-}vhj>ed13!?uHcouZQ>#u@8MIC2_z;~^`%yy^EL_`fUts$6(sQ7!VHZ#m-+ zaJO)<)+e$dC(X=RA`|%6pCyhv zqk81UlZqIkm1&~OlF3vJc=5#dkz3LuyU4$5sE;0=brHYqX}>_Lr%&tl5T=@+*3+(@ zC0Yscxxd579(hbd>ZC`CZdl0&(Wel`$-bUNn>v=%-zX3__H8bEL4`lDyM{;j6jDKj zF<(ok>S=t9{;sgs^AN^5DZ6m~Yd1z|0648w>ch2fAVv8beKkeQ@w)hJUuOGTFjYno z1)IXF;K=YNG!6p9IaF52kL1*F%R?sJQADixX{`R`H+vK@4JlvdKEa0@pTblnrWL}O zk2pZ;{dc&Wla}k!3F~kQnT@Sf42a)yQnjB~83!$J|ANozJ?(I!PS=IYeZx{3(D3iu zaR1Bm4up}|SE@DeatgcLx8x#0`>UIw|8lhkxGJN1;1pj(1-C3B&$pksk?^k{>_9Q} zl{3n>egQ*L8<8T5|1)z3D!eY= zjBC{4xYu8EbIF~Z&*Q275)WFmO^F&L6BceEw= ztcLMFQ;8Z^XO3ii&cjsC*e_y~tLt4sOW1EU(7=O(Df0p5tjQF(9N&UYlGv1C0_b76 z^Cvh`tmH|IKeeDEqi+>}kd~3UX1Yw2|m`Yq7&RPL-0Dt4B;H#XpSyFU} zLm(vot)9scBegh4WBs-qto4jzfpztltzR>>PHLFxo%sLiUM;}*iR8>|;b7S5R3%?# z-S2v5{}aWv;>wzc+wW7u6FS_4`TiOu4pM#DVrT=7%>J(i)LQT1woBK}!xM0M{IxQ};Oh9>cL75)!$Bq~Fx4B2Y#waNBma6P z*y=_L?S$)Y0#N_A(DA5B&^~{vw%YvOdx~uoF-fF0zU&h`a(o4PVmQN(k^dpr_AY36 zH3pEQ6Xj<%q_YPZoWJ{+vGe=4+m@|6Br)O5yv#_kVetQ;{AGnP(+_Vm*&kB^Cf?Q+ z!|D7NsW8_}MddBUdW`^`Yj=lwrh9>N{%fchUCWe=yR=a4SfprE$o~SO!lNg8UcZLq zKT0dhILNXGZzg|r#=dx7Af%1m49H;k)^4Hx5%YhX^%*jm%-4k#s@;o@2kSbC8orTn zdJqCfgU!6M&K^C`?ndXv$IKxI5-3xOzc+8dWF(yY7e?frAu6adBb0{i&$3}JB-KR$ zQb?u$JD`MrghrwZ?SE|33$M znGJE%_jXdlW2yc}W*TQ^A?Sk_|Lbh}1pCd4&#JEwwIV%$j(#ghWLeYJn*5TqT$)y; zK_2;D4bl`nU?NKrBm9-XqNq0XFH4N>Za+>Xq{3g*dvxw}So&OXI*6_hpqn55XBas8 zc&QFq-FIyr?B7EE7*V}Go1OXtV%;IN~3 zMQ+@Akm^5biA%7*FsxW#oHkhEJg7;KeeMIqtJ6gBm%g}#(vL!f>b(4j5nQ}tILi;$t2Vs^c_Jh=cn-aLD*!l-6KbeiSLj6hk{?e-C&0lCoGwrS~&q@CtONYl#CXI<^+wlxR zF&TEkJ`G7Li)-ob16sc&3oM%?`K#~md|`9vSDCaczdFikc75jEEUAN@OIln}fN zSG2_Gc#9Bc&aE+grA})^v&MDXXTN-1_^!k?mSa-A%D4qcwE^Nb?eR!W+XjO0&d1& zN|B|)Z*Mv|6gpgZ&;DylQa@d|bNCF??AcM&_3OFN%|K^Pkx_i*_W-2D=<<7Z0xp^O z?r*t^{)GIgz);+6&`i^-LYU{_Dg8$OS2tJw4|Ui6NtA3Mx$TO=En{bFktIn8W!%P+ zknGLKXvQ)sOZHKy>`V5tWEmP!L_-GEt&rhFU&ih>N z^FG(P&hG(gS>OZ=KTJl8aOda5C^_Z(H7k5VeRh6Da zda|sR(YSG?{V2bW`gWD}Ah{e3X9!Gea9Me$XPmqD0RS21W{zaeAIse`ZmZXO`KhS% znN@+LaO!?C3I{|exVy4>rb89CF^en5&meDIgZ_(9Ag7k=q-5D@JsoDSfJsFp5QN{p z44t^IUBe1(ges~NLqelbich*CwHbL2BMfKWi9Qhf6+D#mHZ7V1n;N4+kO{Pi$ZS*T zvO-csrP1=}nZUZa?-DoAa_FNjFb;64&m7XNWYvt$pXxAe*C$9cRk#2UId~SGlIj2qhL*B+9}AIKekH{8 z$B{Q@=p4Se6IYA;&&IO~Xlb5m=ug&&_V4Y%1;TkyD4|P4rK*c|qnuM6=Y<(*5<{@2 zjulyjR&tS$7Z)ctYPG|`_QI)dKmWi;Y}2C%-n|?G3upXXJ~8A$bS=Ki`9~2y(~v{Z z=#76?Z|mrKarJPa>>FdQ>sEyzMVsot;`epYtb^6nZ|ZMCz8wE2sp0zQOu->&e6$wH z2FMGx*FrV3%u5J52;#gt5FK{9Ku^-*1P9h*VSJ=zaqYNbM{JMhF-BU&pf_?ljw)6T zs~wJg`|uSz)+o%;;>5ee)+9bc&f_@FU@>Vw6W{$tjO24&ab4??QSC#m^-k>tP3i7m zPAAwnMs39CZ->VJtg-_|2d;%IM$Ty&Sx2YCVlk4bD!NO}4!I41kC}cTyfw%rjOxS|6N?S$$dWPV2w3{x|}Hg-`t z@f-UlbQ%rcf%e%i^NnPa^}IPzz8atkz4;5mGXmCP+R{1c-k)=87gz|L(wuIT zf01)@mB&S-YU#>qgU@h^G`v>74oj@+*MjCsmli zg^S1^8SA#Az5u73P8YK_jauStw^^v^`LGoI36!Sg8q{{+n!@G@e&Ak3R{?r1fBZb1 zpCvN?p8jU{2z}9o^c+o*S2qZsaY^l{vc{8Um}3F+4HGS zS|_P45?%_v*LQax&Wl2tD=f{5q6Mn6bv0y*TI~#asr;cn#L zLp!BCxFn_|V(Jd5 z3Bvk5_T&~_CHC1>s>TIVL}=vJQtt8_P5y+O-z;z$-+!uTs~HJ~t9!`#ERe2?`9BsL z{-a?fvD>%0hrk`yrMZ*qTQeyZ$+k_4EBDC!KZ4J#?`E{qC7eTMXIK3fTW$*y%|mHT zk()!g+Sf$v-AW)%*nlW*PSMhSw^qbZ^}7AqF3HeQ&74y9`5bID&w!tLmFgApEGS}c z$gnym&{}e`ZIc2zO@a_#W$NbQy!4Isid?%e<#%S<-Kakk+N1WiA}QT7;4;m0vHC_^ zC#1!8Gl!D;%JTRFa)Q-{6cJ6`-_Cafwnv29ryCxp{XC@URnuP}u@!WtyshA`7mFzLUvS50H3`@&vn~_qI-a z5v0Vod^b;j&Jg)_;z#gL9g3>I!Xyi%yoJz&wg0r2x5=VY6C!HSxi0@kYLFH%pL94q zFCSuU23s;SSt`&@xF1g+-xjnQVUu0zYU(aE^QFQIJFiBK7KX2gCocvyt0-`ye7~68 zROWO6Y5`vIp)MwiafaM5^J{bY)vU(mR2a@1A-`cGEx~H%S9>%_M4Kzbkis|xfnLiX zoUYikA8$q_Al9w+k)x(CYZhfk`?i z7U*_GT+o+acwMXYJ?B zgcCt`VN>_x(6hbVcVpJOO1Ce$fsCL2B`t_twc5@HHW$8Bv8kO9%FM(lq+v5VsA0m$VUwjq_Y3 zOfytWsd$W@^E*trSTW`OqRby%j-woxUrKmKA624|^k0)II$hLnGtS6fP_pr7Yf?3h z6J}SzRd6LaE><<*PHv)DN#gQilHNU9yra}sMRuD%AqwM>W`FcbU&^G!SN4xtJ*h~% z-{c!Xsfml{^ZcvmHsXA|mDKQn3;O~ZeNKhQ^>d1UdV(anP!pC{^Uw=M^eQWeINli2 zCm}W3E+P2%=#yhqzln=kuHu*izeF?Gfye3sy9cowY*n`JWqEdf1&ZN?E~oc@53|yH zmcb$-eo)d0d|7p;CZz7s&l;|rNjZKEoa?*HrF(HKzumXberf*!sFG}-%2*CW!nT9J z5R>}sc9LJgwSb88MxQUu{9TM=(JV(T~*KzLd{F2$QWm@PX$YoGv3&Qir2>`kur z)9&==spn4`X!TaSTBy@*4?CX+$f4~_Y7syF^Ew}h9215a$`vvgYt=5!mITXb{7u*7 z_#R> z_Gn*hn5hpaE5T$7m>YrzRqG0uusoI7a>SkC*wLVTAPvVAYVA{D%Y!SZEOqza4@e~K z_@aqI;ii|Oj?WV?A=G^rOL|k~;v;QCVl(yDZR?>tgSEwOKDUUUX&AjyoBWF{ZtzgzqG|JQd;qaSM?XzAih3}zIg zW;u7?%<;ab`+YB652zR8!=k35s-dK!s-&WGLsd;zLseH*O;JTvS4AZt>VfG00R#kj a`rLv4cYrwYLn#BmVqs=&T5Wvm(SHCF53YOw From 4c07bd097548c89623521433cef9a88f307368b5 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Tue, 5 Mar 2024 17:15:31 +0100 Subject: [PATCH 062/108] Autoresume checkbox local (New) (#1034) * Add functionality to context to reset SessionAssistant * Support autoresuming session by id Minor: Imporoved outcome fetching and resume api * Update agent respawn to check local behaviour * It has never been so hard crashing checkbox * Allow resuming session from init state and load sessions in resume_session * Allow _resume_session to calculate the outcome if not provided * Improve documentation and traceability of unsupported error * Update and document raises IncompatibleJobError Minor: raise better exception (the one it did before) * Black subcommand * Fix tests for the new interface * Add tests for subcommands * More tests for units _resumed_session and _should_autoresume_last_run * Test also _auto_resume_session * Test resume_session in assistatn * Black subcommands * Remove pointless ifelse wall * Minor formatiing and using default value args * Removed call to os.getenv from tests --- .../checkbox_ng/launcher/checkbox_cli.py | 2 + .../checkbox_ng/launcher/subcommands.py | 156 ++++++-- .../checkbox_ng/launcher/test_subcommands.py | 337 ++++++++++++++++-- .../plainbox/impl/session/assistant.py | 16 +- .../plainbox/impl/session/test_assistant.py | 35 ++ .../metabox/metabox-provider/units/resume.pxu | 4 +- .../scenarios/restart/agent_respawn.py | 32 +- 7 files changed, 521 insertions(+), 61 deletions(-) diff --git a/checkbox-ng/checkbox_ng/launcher/checkbox_cli.py b/checkbox-ng/checkbox_ng/launcher/checkbox_cli.py index 2c58f89433..c16ab3fa63 100644 --- a/checkbox-ng/checkbox_ng/launcher/checkbox_cli.py +++ b/checkbox-ng/checkbox_ng/launcher/checkbox_cli.py @@ -56,6 +56,8 @@ class Context: def __init__(self, args, sa): self.args = args self.sa = sa + def reset_sa(self): + self.sa = SessionAssistant() def main(): diff --git a/checkbox-ng/checkbox_ng/launcher/subcommands.py b/checkbox-ng/checkbox_ng/launcher/subcommands.py index 1dd4955614..1da266c14c 100644 --- a/checkbox-ng/checkbox_ng/launcher/subcommands.py +++ b/checkbox-ng/checkbox_ng/launcher/subcommands.py @@ -25,6 +25,7 @@ from tempfile import TemporaryDirectory import textwrap import fnmatch +import contextlib import gettext import json import logging @@ -38,6 +39,7 @@ from plainbox.abc import IJobResult from plainbox.impl.color import Colorizer +from plainbox.impl.session.resume import IncompatibleJobError from plainbox.impl.execution import UnifiedRunner from plainbox.impl.highlevel import Explorer from plainbox.impl.result import MemoryJobResult @@ -258,10 +260,8 @@ def invoked(self, ctx): something_got_chosen = True except ResumeInstead: self.sa.finalize_session() - something_got_chosen = ( - self._manually_resume_session( - self.resume_candidates - ) + something_got_chosen = self._manually_resume_session( + self.resume_candidates ) if not self.ctx.sa.get_static_todo_list(): @@ -340,6 +340,58 @@ def join_cmd(args): lambda session_id: [join_cmd(respawn_cmd + [session_id])] ) + @contextlib.contextmanager + def _resumed_session(self, session_id): + """ + Used to temporarily resume a session to inspect it, abandoning it + before exiting the context + """ + try: + # reload the list of resumable_session in SA + yield self.sa.resume_session(session_id) + finally: + self.ctx.reset_sa() + + def _should_autoresume_last_run(self, resume_candidates): + try: + last_abandoned_session = resume_candidates[0] + except IndexError: + return False + try: + with self._resumed_session(last_abandoned_session.id) as metadata: + app_blob = json.loads(metadata.app_blob.decode("UTF-8")) + + if not app_blob.get("testplan_id"): + return False + + self.sa.select_test_plan(app_blob["testplan_id"]) + self.sa.bootstrap() + + if not metadata.running_job_name: + return False + + job_state = self.sa.get_job_state(metadata.running_job_name) + if job_state.job.plugin != "shell": + return False + return True + except IncompatibleJobError as ije: + # last resumable session is incompatible, produce a helpful log + _logger.error( + "Checkbox tried to resume last session (%s), but the " + "content of Checkbox Providers has changed.", + last_abandoned_session.id, + ) + _logger.error(str(ije)) + _logger.error( + "To resume it either revert the latest Checkbox snap refresh" + ) + _logger.error( + "or roll back the relevant provider debian package first" + ) + + input("\nPress enter to start Checkbox.") + return False + def _auto_resume_session(self, resume_candidates): """ Check if there was a request to auto-resume a session. @@ -358,12 +410,17 @@ def _auto_resume_session(self, resume_candidates): ] if requested_sessions: # session_ids are unique, so there should be only 1 - self._resume_session(requested_sessions[0]) + self._resume_session( + requested_sessions[0].id, IJobResult.OUTCOME_UNDECIDED + ) return True else: raise RuntimeError("Requested session is not resumable!") - else: - return False + elif self._should_autoresume_last_run(resume_candidates): + last_session = resume_candidates[0] + self._resume_session(last_session.id, None) + return True + return False def _manually_resume_session(self, resume_candidates): """ @@ -405,7 +462,7 @@ def _manually_resume_session(self, resume_candidates): break if resume_params.session_id: - self._resume_session(resume_params) + self._resume_session_via_resume_params(resume_params) return True return False @@ -432,53 +489,89 @@ def _run_resume_ui_loop(self, resume_candidates): self._delete_old_sessions(ids) return False - def _resume_session(self, resume_params): - metadata = self.ctx.sa.resume_session(resume_params.session_id) + def _resume_session_via_resume_params(self, resume_params): + outcome = { + "pass": IJobResult.OUTCOME_PASS, + "fail": IJobResult.OUTCOME_FAIL, + "skip": IJobResult.OUTCOME_SKIP, + "rerun": IJobResult.OUTCOME_UNDECIDED, + }[resume_params.action] + return self._resume_session( + resume_params.session_id, outcome, resume_params.comments + ) + + def _get_autoresume_outcome_last_job(self, metadata): + """ + Calculates the result of the latest running job given its flags. This + is used to automatically resume a session and assign an outcome to the + job that interrupted the session. If the interruption is due to a + noreturn job (for example, reboot), the job will be marked as passed, + else, if the job made Checkbox crash, it will be marked as crash + """ + job_state = self.sa.get_job_state(metadata.running_job_name) + if "noreturn" in job_state.job.flags: + return IJobResult.OUTCOME_PASS + return IJobResult.OUTCOME_CRASH + + def _resume_session( + self, session_id: str, outcome: "IJobResult|None", comments=[] + ): + """ + Resumes the session with the given session_id assigning to the latest + running job the given outcome. If outcome is not provided it will be + calculated from the function _get_autoresume_outcome_last_job + """ + metadata = self.ctx.sa.resume_session(session_id) if "testplanless" not in metadata.flags: app_blob = json.loads(metadata.app_blob.decode("UTF-8")) test_plan_id = app_blob["testplan_id"] self.ctx.sa.select_test_plan(test_plan_id) self.ctx.sa.bootstrap() + if outcome is None: + outcome = self._get_autoresume_outcome_last_job(metadata) + last_job = metadata.running_job_name is_cert_blocker = ( self.ctx.sa.get_job_state(last_job).effective_certification_status == "blocker" ) # If we resumed maybe not rerun the same, probably broken job - result_dict = { - "comments": resume_params.comments, - } - if resume_params.action == "pass": - result_dict["outcome"] = IJobResult.OUTCOME_PASS + result_dict = {"comments": comments, "outcome": outcome} + if outcome == IJobResult.OUTCOME_PASS: result_dict["comments"] = newline_join( result_dict["comments"], "Passed after resuming execution" ) - elif resume_params.action == "fail": - if is_cert_blocker: - if not resume_params.comments: - result_dict["comments"] = request_comment("why it failed") + elif outcome == IJobResult.OUTCOME_FAIL: + if is_cert_blocker and not comments: + result_dict["comments"] = request_comment("why it failed") else: result_dict["comments"] = newline_join( result_dict["comments"], "Failed after resuming execution" ) - - result_dict["outcome"] = IJobResult.OUTCOME_FAIL - elif resume_params.action == "skip": - if is_cert_blocker: - if not resume_params.comments: - result_dict["comments"] = request_comment( - "why you want to skip it" - ) + elif outcome == IJobResult.OUTCOME_SKIP: + if is_cert_blocker and not comments: + result_dict["comments"] = request_comment( + "why you want to skip it" + ) else: result_dict["comments"] = newline_join( result_dict["comments"], "Skipped after resuming execution" ) - result_dict["outcome"] = IJobResult.OUTCOME_SKIP - - elif resume_params.action == "rerun": + elif outcome == IJobResult.OUTCOME_CRASH: + if is_cert_blocker and not comments: + result_dict["comments"] = request_comment("why it failed") + else: + result_dict["comments"] = newline_join( + result_dict["comments"], "Crashed after resuming execution" + ) + elif outcome == IJobResult.OUTCOME_UNDECIDED: # if we don't call use_job_result it means we'll rerun the job return + else: + raise ValueError( + "Unsupported outcome for resume {}".format(outcome) + ) result = MemoryJobResult(result_dict) self.ctx.sa.use_job_result(last_job, result) @@ -1209,7 +1302,8 @@ def invoked(self, ctx): attrs["full_id"] = job_unit.id attrs["id"] = job_unit.partial_id attrs["certification_status"] = self.ctx.sa.get_job_state( - job).effective_certification_status + job + ).effective_certification_status jobs.append(attrs) if ctx.args.format == "?": all_keys = set() diff --git a/checkbox-ng/checkbox_ng/launcher/test_subcommands.py b/checkbox-ng/checkbox_ng/launcher/test_subcommands.py index 44a9088d9c..7c01e0e715 100644 --- a/checkbox-ng/checkbox_ng/launcher/test_subcommands.py +++ b/checkbox-ng/checkbox_ng/launcher/test_subcommands.py @@ -18,6 +18,7 @@ import datetime +from functools import partial from unittest import TestCase from unittest.mock import patch, Mock, MagicMock @@ -26,6 +27,7 @@ from checkbox_ng.launcher.subcommands import ( Launcher, ListBootstrapped, + IncompatibleJobError, ResumeInstead, IJobResult, request_comment, @@ -90,9 +92,7 @@ def test__manually_resume_session_delete(self, resume_menu_mock): # delete something, the check should see that the entries list is # empty and return false as there is nothing to maybe resume - self.assertFalse( - Launcher._manually_resume_session(self_mock, []) - ) + self.assertFalse(Launcher._manually_resume_session(self_mock, [])) @patch("checkbox_ng.launcher.subcommands.ResumeMenu") def test__manually_resume_session(self, resume_menu_mock): @@ -102,8 +102,7 @@ def test__manually_resume_session(self, resume_menu_mock): # the user has selected something from the list, we notice self.assertTrue(Launcher._manually_resume_session(self_mock, [])) # and we try to resume the session - self.assertTrue(self_mock._resume_session.called) - + self.assertTrue(self_mock._resume_session_via_resume_params.called) @patch("checkbox_ng.launcher.subcommands.ResumeMenu") def test__manually_resume_session_empty_id(self, resume_menu_mock): @@ -112,18 +111,22 @@ def test__manually_resume_session_empty_id(self, resume_menu_mock): self.assertFalse(Launcher._manually_resume_session(self_mock, [])) - @patch("checkbox_ng.launcher.subcommands.MemoryJobResult") @patch("checkbox_ng.launcher.subcommands.newline_join", new=MagicMock()) def test__resume_session_pass(self, memory_job_result_mock): self_mock = MagicMock() + self_mock._resume_session = partial( + Launcher._resume_session, self_mock + ) session_metadata_mock = self_mock.ctx.sa.resume_session.return_value session_metadata_mock.flags = ["testplanless"] resume_params_mock = MagicMock() resume_params_mock.action = "pass" - Launcher._resume_session(self_mock, resume_params_mock) + Launcher._resume_session_via_resume_params( + self_mock, resume_params_mock + ) args, _ = memory_job_result_mock.call_args_list[-1] result_dict, *_ = args @@ -136,6 +139,9 @@ def test__resume_session_fail_cert_blocker( self, request_comment_mock, memory_job_result_mock ): self_mock = MagicMock() + self_mock._resume_session = partial( + Launcher._resume_session, self_mock + ) self_mock.ctx.sa.get_job_state.return_value.effective_certification_status = ( "blocker" ) @@ -147,7 +153,9 @@ def test__resume_session_fail_cert_blocker( resume_params_mock.action = "fail" resume_params_mock.comments = None - Launcher._resume_session(self_mock, resume_params_mock) + Launcher._resume_session_via_resume_params( + self_mock, resume_params_mock + ) args, _ = memory_job_result_mock.call_args_list[-1] result_dict, *_ = args @@ -159,6 +167,9 @@ def test__resume_session_fail_cert_blocker( @patch("checkbox_ng.launcher.subcommands.newline_join", new=MagicMock()) def test__resume_session_fail_non_blocker(self, memory_job_result_mock): self_mock = MagicMock() + self_mock._resume_session = partial( + Launcher._resume_session, self_mock + ) self_mock.ctx.sa.get_job_state.return_value.effective_certification_status = ( "non-blocker" ) @@ -169,12 +180,57 @@ def test__resume_session_fail_non_blocker(self, memory_job_result_mock): resume_params_mock = MagicMock() resume_params_mock.action = "fail" - Launcher._resume_session(self_mock, resume_params_mock) + Launcher._resume_session_via_resume_params( + self_mock, resume_params_mock + ) args, _ = memory_job_result_mock.call_args_list[-1] result_dict, *_ = args self.assertEqual(result_dict["outcome"], IJobResult.OUTCOME_FAIL) + @patch("checkbox_ng.launcher.subcommands.MemoryJobResult") + @patch("checkbox_ng.launcher.subcommands.request_comment") + @patch("checkbox_ng.launcher.subcommands.newline_join", new=MagicMock()) + def test__resume_session_crash_cert_blocker( + self, request_comment_mock, memory_job_result_mock + ): + self_mock = MagicMock() + self_mock.ctx.sa.get_job_state.return_value.effective_certification_status = ( + "blocker" + ) + + session_metadata_mock = self_mock.ctx.sa.resume_session.return_value + session_metadata_mock.flags = ["testplanless"] + + Launcher._resume_session( + self_mock, "session_id", IJobResult.OUTCOME_CRASH, None + ) + + args, _ = memory_job_result_mock.call_args_list[-1] + result_dict, *_ = args + self.assertEqual(result_dict["outcome"], IJobResult.OUTCOME_CRASH) + # given that no comment was in resume_params, the resume procedure asks for it + self.assertTrue(request_comment_mock.called) + + @patch("checkbox_ng.launcher.subcommands.MemoryJobResult") + @patch("checkbox_ng.launcher.subcommands.newline_join", new=MagicMock()) + def test__resume_session_crash_non_blocker(self, memory_job_result_mock): + self_mock = MagicMock() + self_mock.ctx.sa.get_job_state.return_value.effective_certification_status = ( + "non-blocker" + ) + + session_metadata_mock = self_mock.ctx.sa.resume_session.return_value + session_metadata_mock.flags = ["testplanless"] + + Launcher._resume_session( + self_mock, "session_id", IJobResult.OUTCOME_CRASH, None + ) + + args, _ = memory_job_result_mock.call_args_list[-1] + result_dict, *_ = args + self.assertEqual(result_dict["outcome"], IJobResult.OUTCOME_CRASH) + @patch("checkbox_ng.launcher.subcommands.MemoryJobResult") @patch("checkbox_ng.launcher.subcommands.request_comment") @patch("checkbox_ng.launcher.subcommands.newline_join", new=MagicMock()) @@ -182,6 +238,9 @@ def test__resume_session_skip_blocker( self, request_comment_mock, memory_job_result_mock ): self_mock = MagicMock() + self_mock._resume_session = partial( + Launcher._resume_session, self_mock + ) self_mock.ctx.sa.get_job_state.return_value.effective_certification_status = ( "blocker" ) @@ -193,7 +252,9 @@ def test__resume_session_skip_blocker( resume_params_mock.action = "skip" resume_params_mock.comments = None - Launcher._resume_session(self_mock, resume_params_mock) + Launcher._resume_session_via_resume_params( + self_mock, resume_params_mock + ) args, _ = memory_job_result_mock.call_args_list[-1] result_dict, *_ = args @@ -205,6 +266,9 @@ def test__resume_session_skip_blocker( @patch("checkbox_ng.launcher.subcommands.newline_join", new=MagicMock()) def test__resume_session_skip_non_blocker(self, memory_job_result_mock): self_mock = MagicMock() + self_mock._resume_session = partial( + Launcher._resume_session, self_mock + ) self_mock.ctx.sa.get_job_state.return_value.effective_certification_status = ( "non-blocker" ) @@ -215,7 +279,9 @@ def test__resume_session_skip_non_blocker(self, memory_job_result_mock): resume_params_mock = MagicMock() resume_params_mock.action = "skip" - Launcher._resume_session(self_mock, resume_params_mock) + Launcher._resume_session_via_resume_params( + self_mock, resume_params_mock + ) args, _ = memory_job_result_mock.call_args_list[-1] result_dict, *_ = args @@ -225,6 +291,9 @@ def test__resume_session_skip_non_blocker(self, memory_job_result_mock): @patch("checkbox_ng.launcher.subcommands.newline_join", new=MagicMock()) def test__resume_session_rerun(self, memory_job_result_mock): self_mock = MagicMock() + self_mock._resume_session = partial( + Launcher._resume_session, self_mock + ) self_mock.ctx.sa.get_job_state.return_value.effective_certification_status = ( "non-blocker" ) @@ -235,11 +304,227 @@ def test__resume_session_rerun(self, memory_job_result_mock): resume_params_mock = MagicMock() resume_params_mock.action = "rerun" - Launcher._resume_session(self_mock, resume_params_mock) + Launcher._resume_session_via_resume_params( + self_mock, resume_params_mock + ) # we don't use job result of rerun jobs self.assertFalse(self_mock.ctx.sa.use_job_result.called) + @patch("checkbox_ng.launcher.subcommands.MemoryJobResult") + @patch("checkbox_ng.launcher.subcommands.newline_join", new=MagicMock()) + def test__resume_session_autocalculate_outcome( + self, memory_job_result_mock + ): + self_mock = MagicMock() + self_mock.ctx.sa.get_job_state.return_value.effective_certification_status = ( + "non-blocker" + ) + self_mock._get_autoresume_outcome_last_job.return_value = ( + IJobResult.OUTCOME_CRASH + ) + + session_metadata_mock = self_mock.ctx.sa.resume_session.return_value + session_metadata_mock.flags = [] + session_metadata_mock.app_blob = b'{"testplan_id" : "testplan_id"}' + + Launcher._resume_session(self_mock, "session_id", None, None) + + args, _ = memory_job_result_mock.call_args_list[-1] + result_dict, *_ = args + self.assertEqual(result_dict["outcome"], IJobResult.OUTCOME_CRASH) + + def test__get_autoresume_outcome_last_job_noreturn(self): + self_mock = MagicMock() + job_state = self_mock.sa.get_job_state() + job_state.job.flags = "noreturn" + metadata_mock = MagicMock() + metadata_mock.running_job_name = "running_metadata_job_name" + + outcome = Launcher._get_autoresume_outcome_last_job( + self_mock, metadata_mock + ) + + self.assertEqual(outcome, IJobResult.OUTCOME_PASS) + + def test__get_autoresume_outcome_last_job(self): + self_mock = MagicMock() + job_state = self_mock.sa.get_job_state() + job_state.job.flags = "" + metadata_mock = MagicMock() + metadata_mock.running_job_name = "running_metadata_job_name" + + outcome = Launcher._get_autoresume_outcome_last_job( + self_mock, metadata_mock + ) + + self.assertEqual(outcome, IJobResult.OUTCOME_CRASH) + + def test__resumed_session(self): + self_mock = MagicMock() + + with Launcher._resumed_session(self_mock, "session_id"): + self.assertTrue(self_mock.sa.resume_session.called) + self.assertFalse(self_mock.ctx.reset_sa.called) + self.assertTrue(self_mock.ctx.reset_sa.called) + + def test__should_autoresume_last_run_no_candidate(self): + self_mock = MagicMock() + + self.assertFalse(Launcher._should_autoresume_last_run(self_mock, [])) + + @patch("os.getenv", return_value="checkbox22") + @patch("checkbox_ng.launcher.subcommands.input") + @patch("checkbox_ng.launcher.subcommands._logger") + def test__should_autoresume_last_run_incompatible_session_snaps( + self, _logger_mock, input_mock, os_getenv_mock + ): + self_mock = MagicMock() + self_mock._resumed_session = partial( + Launcher._resumed_session, self_mock + ) + session_mock = MagicMock(id="session_id") + + self_mock.sa.resume_session.side_effect = IncompatibleJobError + + self.assertFalse( + Launcher._should_autoresume_last_run(self_mock, [session_mock]) + ) + # very important here that we print errors and stop because else the + # user is left wondering why the session didn't autoresume + self.assertTrue(_logger_mock.error.called) + self.assertTrue(input_mock.called) + + @patch("os.getenv", return_value=None) + @patch("checkbox_ng.launcher.subcommands.input") + @patch("checkbox_ng.launcher.subcommands._logger") + def test__should_autoresume_last_run_incompatible_session_debs( + self, _logger_mock, input_mock, os_getenv_mock + ): + self_mock = MagicMock() + self_mock._resumed_session = partial( + Launcher._resumed_session, self_mock + ) + session_mock = MagicMock(id="session_id") + + self_mock.sa.resume_session.side_effect = IncompatibleJobError + + self.assertFalse( + Launcher._should_autoresume_last_run(self_mock, [session_mock]) + ) + # very important here that we print errors and stop because else the + # user is left wondering why the session didn't autoresume + self.assertTrue(_logger_mock.error.called) + self.assertTrue(input_mock.called) + + def test__should_autoresume_last_run_no_testplan(self): + self_mock = MagicMock() + self_mock._resumed_session = partial( + Launcher._resumed_session, self_mock + ) + session_mock = MagicMock(id="session_id") + metadata_mock = MagicMock(app_blob=b"{}") + self_mock.sa.resume_session.return_value = metadata_mock + + self.assertFalse( + Launcher._should_autoresume_last_run(self_mock, [session_mock]) + ) + + def test__should_autoresume_last_run_no_running_job_name(self): + self_mock = MagicMock() + self_mock._resumed_session = partial( + Launcher._resumed_session, self_mock + ) + session_mock = MagicMock(id="session_id") + metadata_mock = MagicMock( + app_blob=b'{"testplan_id" : "testplan_id"}', running_job_name=None + ) + self_mock.sa.resume_session.return_value = metadata_mock + + self.assertFalse( + Launcher._should_autoresume_last_run(self_mock, [session_mock]) + ) + + def test__should_autoresume_last_run_manual_job(self): + self_mock = MagicMock() + self_mock._resumed_session = partial( + Launcher._resumed_session, self_mock + ) + session_mock = MagicMock(id="session_id") + metadata_mock = MagicMock( + app_blob=b'{"testplan_id" : "testplan_id"}', + running_job_name="running_job_name", + ) + self_mock.sa.resume_session.return_value = metadata_mock + job_state_mock = self_mock.sa.get_job_state() + job_state_mock.job.plugin = "user-interact" + + self.assertFalse( + Launcher._should_autoresume_last_run(self_mock, [session_mock]) + ) + + def test__should_autoresume_last_run_yes(self): + self_mock = MagicMock() + self_mock._resumed_session = partial( + Launcher._resumed_session, self_mock + ) + session_mock = MagicMock(id="session_id") + metadata_mock = MagicMock( + app_blob=b'{"testplan_id" : "testplan_id"}', + running_job_name="running_job_name", + ) + self_mock.sa.resume_session.return_value = metadata_mock + job_state_mock = self_mock.sa.get_job_state() + job_state_mock.job.plugin = "shell" + + self.assertTrue( + Launcher._should_autoresume_last_run(self_mock, [session_mock]) + ) + + def test__auto_resume_session_from_ctx(self): + self_mock = MagicMock() + resume_candidate_mock = MagicMock(id="session_to_resume") + self_mock.ctx.args.session_id = "session_to_resume" + + self.assertTrue( + Launcher._auto_resume_session(self_mock, [resume_candidate_mock]) + ) + self.assertTrue(self_mock._resume_session.called) + + def test__auto_resume_session_from_ctx_unknown_session(self): + self_mock = MagicMock() + resume_candidate_mock = MagicMock(id="some_other_session") + self_mock.ctx.args.session_id = "session_to_resume" + + with self.assertRaises(RuntimeError): + self.assertTrue( + Launcher._auto_resume_session( + self_mock, [resume_candidate_mock] + ) + ) + + def test__auto_resume_session_autoresume(self): + self_mock = MagicMock() + resume_candidate_mock = MagicMock(id="session_to_resume") + self_mock.ctx.args.session_id = None + self_mock._should_autoresume_last_run.return_value = True + + self.assertTrue( + Launcher._auto_resume_session(self_mock, [resume_candidate_mock]) + ) + self.assertTrue(self_mock._resume_session.called) + + def test__auto_resume_session_no_autoresume(self): + self_mock = MagicMock() + resume_candidate_mock = MagicMock(id="session_to_resume") + self_mock.ctx.args.session_id = None + self_mock._should_autoresume_last_run.return_value = False + + self.assertFalse( + Launcher._auto_resume_session(self_mock, [resume_candidate_mock]) + ) + self.assertFalse(self_mock._resume_session.called) + @patch("checkbox_ng.launcher.subcommands.load_configs") @patch("checkbox_ng.launcher.subcommands.Colorizer", new=MagicMock()) def test_invoked_resume(self, load_config_mock): @@ -265,7 +550,9 @@ def setUp(self): self.launcher = Launcher() self.launcher._maybe_rerun_jobs = Mock(return_value=False) self.launcher._auto_resume_session = Mock(return_value=False) - self.launcher._resume_session = Mock(return_value=False) + self.launcher._resume_session_via_resume_params = Mock( + return_value=False + ) self.launcher._start_new_session = Mock() self.launcher._pick_jobs_to_run = Mock() self.launcher._export_results = Mock() @@ -301,6 +588,7 @@ def test_invoke_returns_1_on_many_diff_outcomes(self): self.ctx.sa.get_summary = Mock(return_value=mock_results) self.assertEqual(self.launcher.invoked(self.ctx), 1) + class TestLListBootstrapped(TestCase): def setUp(self): self.launcher = ListBootstrapped() @@ -308,12 +596,10 @@ def setUp(self): self.ctx.args = Mock(TEST_PLAN="", format="") self.ctx.sa = Mock( start_new_session=Mock(), - get_test_plans=Mock( - return_value=["test-plan1", "test-plan2"]), + get_test_plans=Mock(return_value=["test-plan1", "test-plan2"]), select_test_plan=Mock(), bootstrap=Mock(), - get_static_todo_list=Mock( - return_value=["test-job1", "test-job2"]), + get_static_todo_list=Mock(return_value=["test-job1", "test-job2"]), get_job=Mock( side_effect=[ Mock( @@ -322,10 +608,10 @@ def setUp(self): "summary": "fake-job1", "plugin": "manual", "description": "fake-description1", - "certification_status": "unspecified" + "certification_status": "unspecified", }, id="namespace1::test-job1", - partial_id="test-job1" + partial_id="test-job1", ), Mock( _raw_data={ @@ -333,15 +619,16 @@ def setUp(self): "summary": "fake-job2", "plugin": "shell", "command": "ls", - "certification_status": "unspecified" + "certification_status": "unspecified", }, id="namespace2::test-job2", - partial_id="test-job2" + partial_id="test-job2", ), ] ), get_job_state=Mock( - return_value=Mock(effective_certification_status="blocker")), + return_value=Mock(effective_certification_status="blocker") + ), get_resumable_sessions=Mock(return_value=[]), get_dynamic_todo_list=Mock(return_value=[]), ) @@ -369,10 +656,7 @@ def test_invoke_print_output_standard_format(self, stdout): self.ctx.args.TEST_PLAN = "test-plan1" self.ctx.args.format = "{full_id}\n" - expected_out = ( - "namespace1::test-job1\n" - "namespace2::test-job2\n" - ) + expected_out = "namespace1::test-job1\n" "namespace2::test-job2\n" self.launcher.invoked(self.ctx) self.assertEqual(stdout.getvalue(), expected_out) @@ -397,6 +681,7 @@ def test_invoke_print_output_customized_format(self, stdout): self.launcher.invoked(self.ctx) self.assertEqual(stdout.getvalue(), expected_out) + class TestUtilsFunctions(TestCase): @patch("checkbox_ng.launcher.subcommands.Colorizer", new=MagicMock()) @patch("builtins.print") diff --git a/checkbox-ng/plainbox/impl/session/assistant.py b/checkbox-ng/plainbox/impl/session/assistant.py index 75e0c30932..9830b273fe 100644 --- a/checkbox-ng/plainbox/impl/session/assistant.py +++ b/checkbox-ng/plainbox/impl/session/assistant.py @@ -64,6 +64,7 @@ from plainbox.impl.session.restart import IRestartStrategy from plainbox.impl.session.restart import detect_restart_strategy from plainbox.impl.session.restart import RemoteDebRestartStrategy +from plainbox.impl.session.resume import IncompatibleJobError from plainbox.impl.session.storage import WellKnownDirsHelper from plainbox.impl.transport import OAuthTransport from plainbox.impl.transport import TransportError @@ -188,9 +189,11 @@ def __init__( self._job_start_time = None # Keep a record of jobs run during bootstrap phase self._bootstrap_done_list = [] + self._resume_candidates = {} self._load_providers() UsageExpectation.of(self).allowed_calls = { self.start_new_session: "create a new session from scratch", + self.resume_session: "resume a resume candidate", self.get_resumable_sessions: "get resume candidates", self.use_alternate_configuration: ( "use an alternate configuration system" @@ -524,7 +527,7 @@ def start_new_session( ), } - @raises(KeyError, UnexpectedMethodCall) + @raises(KeyError, UnexpectedMethodCall, IncompatibleJobError) def resume_session( self, session_id: str, runner_cls=UnifiedRunner, runner_kwargs=dict() ) -> "SessionMetaData": @@ -537,6 +540,8 @@ def resume_session( Resumed session metadata. :raises KeyError: If the session with a given session_id cannot be found. + :raises IncompatibleJobError: + If the session is incompatible due to a job changing :raises UnexpectedMethodCall: If the call is made at an unexpected time. Do not catch this error. It is a bug in your program. The error message will indicate what @@ -551,6 +556,13 @@ def resume_session( all_units = list( itertools.chain(*[p.unit_list for p in self._selected_providers]) ) + if session_id not in self._resume_candidates: + for resume_candidate in self.get_resumable_sessions(): + if resume_candidate.id == session_id: + break + else: + raise KeyError("Unknown session {}".format(session_id)) + self._manager = SessionManager.load_session( all_units, self._resume_candidates[session_id][0] ) @@ -616,6 +628,8 @@ def get_resumable_sessions(self) -> "Tuple[str, SessionMetaData]": """ UsageExpectation.of(self).enforce() # let's keep resume_candidates, so we don't have to load data again + # also, when this function is called invalidate the cache, as it may + # have been modified by some external source self._resume_candidates = {} for storage in WellKnownDirsHelper.get_storage_list(): data = storage.load_checkpoint() diff --git a/checkbox-ng/plainbox/impl/session/test_assistant.py b/checkbox-ng/plainbox/impl/session/test_assistant.py index 049ab8cf0a..a369955d97 100644 --- a/checkbox-ng/plainbox/impl/session/test_assistant.py +++ b/checkbox-ng/plainbox/impl/session/test_assistant.py @@ -158,3 +158,38 @@ def test_note_metadata_starting_job(self, _): ) self.assertTrue(self_mock._manager.checkpoint.called) + + @mock.patch("plainbox.impl.session.assistant.UsageExpectation") + def test_resume_session_autoload_session_not_found( + self, ue_mock, get_providers_mock + ): + self_mock = mock.MagicMock() + self_mock._resume_candidates = {} + self_mock.get_resumable_sessions.return_value = [] + + with self.assertRaises(KeyError): + SessionAssistant.resume_session(self_mock, "session_id") + + @mock.patch("plainbox.impl.session.assistant.SessionManager") + @mock.patch("plainbox.impl.session.assistant.JobRunnerUIDelegate") + @mock.patch("plainbox.impl.session.assistant._SilentUI") + @mock.patch("plainbox.impl.session.assistant.detect_restart_strategy") + @mock.patch("plainbox.impl.session.assistant.UsageExpectation") + def test_resume_session_autoload_session_found( + self, + ue_mock, + session_manager_mock, + jrd_mock, + _sui_mock, + detect_restart_strategy_mock, + get_providers_mock, + ): + self_mock = mock.MagicMock() + session_mock = mock.MagicMock(id="session_id") + + def get_resumable_sessions(): + self_mock._resume_candidates = {"session_id": session_mock} + + self_mock.get_resumable_sessions.return_value = [session_mock] + + _ = SessionAssistant.resume_session(self_mock, "session_id") diff --git a/metabox/metabox/metabox-provider/units/resume.pxu b/metabox/metabox/metabox-provider/units/resume.pxu index c2638b4ead..3cfb020691 100644 --- a/metabox/metabox/metabox-provider/units/resume.pxu +++ b/metabox/metabox/metabox-provider/units/resume.pxu @@ -3,7 +3,7 @@ _summary: Crash Checkbox flags: simple user: root command: - PID=`ps -o ppid= $$` + PID=`pgrep -f checkbox-cli` kill $PID id: reboot-emulator @@ -11,7 +11,7 @@ _summary: Emulate the reboot flags: simple noreturn user: root command: - PID=`ps -o ppid= $$` + PID=`pgrep -f checkbox-cli` kill $PID unit: test plan diff --git a/metabox/metabox/scenarios/restart/agent_respawn.py b/metabox/metabox/scenarios/restart/agent_respawn.py index 9a45e7db54..53c096813c 100644 --- a/metabox/metabox/scenarios/restart/agent_respawn.py +++ b/metabox/metabox/scenarios/restart/agent_respawn.py @@ -22,13 +22,14 @@ SelectTestPlan, Send, Expect, + Start ) from metabox.core.scenario import Scenario from metabox.core.utils import tag @tag("resume", "automatic") -class ResumeAfterCrashAuto(Scenario): +class AutoResumeAfterCrashAuto(Scenario): modes = ["remote"] launcher = textwrap.dedent( """ @@ -72,3 +73,32 @@ class ResumeAfterCrashManual(Scenario): Expect("job passed"), Expect("Emulate the reboot"), ] + + +@tag("resume", "automatic") +class AutoResumeAfterCrashAutoLocal(Scenario): + modes = ["local"] + launcher = textwrap.dedent( + """ + [launcher] + launcher_version = 1 + stock_reports = text + [test plan] + unit = 2021.com.canonical.certification::checkbox-crash-then-reboot + forced = yes + [test selection] + forced = yes + [ui] + type = silent + """ + ) + steps = [ + Start(), + Start(), + Start(), + AssertRetCode(1), + AssertPrinted("job crashed"), + AssertPrinted("Crash Checkbox"), + AssertPrinted("job passed"), + AssertPrinted("Emulate the reboot"), + ] From 638ec6c0f77aef51338b7820d6e92ff015854bbb Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 6 Mar 2024 14:34:21 +0100 Subject: [PATCH 063/108] Allow listing instances from resume state (bugfix) (#1040) Allow listing instances from resume state --- checkbox-ng/plainbox/impl/session/assistant.py | 1 + 1 file changed, 1 insertion(+) diff --git a/checkbox-ng/plainbox/impl/session/assistant.py b/checkbox-ng/plainbox/impl/session/assistant.py index 9830b273fe..f7ef380263 100644 --- a/checkbox-ng/plainbox/impl/session/assistant.py +++ b/checkbox-ng/plainbox/impl/session/assistant.py @@ -599,6 +599,7 @@ def resume_session( ).allowed_calls = self._get_allowed_calls_in_normal_state() else: UsageExpectation.of(self).allowed_calls = { + self.get_resumable_sessions: "to get resume candidates", self.select_test_plan: "to save test plan selection", self.use_alternate_configuration: ( "use an alternate configuration system" From 32c04d7a0ee0e15e6bab73daad38035a7585045f Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 6 Mar 2024 16:10:49 +0100 Subject: [PATCH 064/108] Support job flags being None (bugfix) (#1041) * Support job flags being None * Modify the test plan to have none-flags on Crash Checkbox * flags should be a set not a string --- checkbox-ng/checkbox_ng/launcher/subcommands.py | 2 +- metabox/metabox/metabox-provider/units/resume.pxu | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/checkbox-ng/checkbox_ng/launcher/subcommands.py b/checkbox-ng/checkbox_ng/launcher/subcommands.py index 1da266c14c..b137589a73 100644 --- a/checkbox-ng/checkbox_ng/launcher/subcommands.py +++ b/checkbox-ng/checkbox_ng/launcher/subcommands.py @@ -509,7 +509,7 @@ def _get_autoresume_outcome_last_job(self, metadata): else, if the job made Checkbox crash, it will be marked as crash """ job_state = self.sa.get_job_state(metadata.running_job_name) - if "noreturn" in job_state.job.flags: + if "noreturn" in (job_state.job.flags or set()): return IJobResult.OUTCOME_PASS return IJobResult.OUTCOME_CRASH diff --git a/metabox/metabox/metabox-provider/units/resume.pxu b/metabox/metabox/metabox-provider/units/resume.pxu index 3cfb020691..f311648191 100644 --- a/metabox/metabox/metabox-provider/units/resume.pxu +++ b/metabox/metabox/metabox-provider/units/resume.pxu @@ -1,12 +1,14 @@ id: checkbox-crasher +unit: job _summary: Crash Checkbox -flags: simple +plugin: shell user: root command: PID=`pgrep -f checkbox-cli` kill $PID id: reboot-emulator +unit: job _summary: Emulate the reboot flags: simple noreturn user: root From e4347c0e418111df78e791569bb5f9546db3d44b Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 6 Mar 2024 18:21:03 +0100 Subject: [PATCH 065/108] Remote resistant no launcher (Bugfix) (#1042) * Use default Configuration when controller doesn't provide any * Test none launcher in remote assistant --- checkbox-ng/plainbox/impl/session/remote_assistant.py | 9 ++++++--- .../plainbox/impl/session/test_remote_assistant.py | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/checkbox-ng/plainbox/impl/session/remote_assistant.py b/checkbox-ng/plainbox/impl/session/remote_assistant.py index 0ccbf9f604..903b3f1933 100644 --- a/checkbox-ng/plainbox/impl/session/remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/remote_assistant.py @@ -720,9 +720,12 @@ def resume_by_id(self, session_id=None, overwrite_result_dict={}): } meta = self.resume_session(session_id, runner_kwargs=runner_kwargs) app_blob = json.loads(meta.app_blob.decode("UTF-8")) - launcher_from_controller = Configuration.from_text( - app_blob["launcher"], "Remote launcher" - ) + if "launcher" in app_blob: + launcher_from_controller = Configuration.from_text( + app_blob["launcher"], "Remote launcher" + ) + else: + launcher_from_controller = Configuration() self._launcher.update_from_another( launcher_from_controller, "Remote launcher" ) diff --git a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py index 61ca30dd62..7889ace3e7 100644 --- a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py @@ -231,7 +231,7 @@ def test_resume_by_id_with_result_no_file_noreturn( rsa._state = remote_assistant.Idle mock_meta = mock.Mock() - mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' + mock_meta.app_blob = b'{"testplan_id": "tp_id"}' rsa.resume_session.return_value = mock_meta os_path_exists_mock = mock.Mock() From 990f6ff1555496417d57f61bfe4e57a53e743d78 Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Thu, 7 Mar 2024 09:01:11 +0800 Subject: [PATCH 066/108] Ensure that jobs that have an "after suspend" counterpart are run before suspend (New) (#1037) Some jobs should be run before *and* after suspend. To take this into account, the `siblings` field was developed[1], along with the `also-after-suspend` and `also-after-suspend-manual` flags[2]. These would let Checkbox spawn a similar job with an added dependency on the suspend job (either the manual or the automated version of it). The problem is that the original job did not have any dependency to force it to be run *before* the suspend job. This was not an issue for test plans organized manually, using regular expressions, because you could have an include section that looks like: storage_.* com.canonical.certification::suspend/suspend_advanced_auto after-suspend-storage.* However, now that template ids can be added in test plans[3], this is a problem. This patch will make sure the jobs that need to run before suspend are added as dependencies of their related suspend job, so that regardless of the order in the test plan, they will be run in the proper order. Documentation is updated to mention guarantee of job running before suspend in Job Unit reference page. Fix: #1010 [1] https://checkbox.readthedocs.io/en/stable/reference/units/job.html#job-siblings-field [2] https://checkbox.readthedocs.io/en/stable/reference/units/job.html#also-after-suspend-flag [3] https://checkbox.readthedocs.io/en/latest/reference/units/test-plan.html --- checkbox-ng/plainbox/impl/ctrl.py | 113 +++++++++++++++++- checkbox-ng/plainbox/impl/depmgr.py | 10 +- checkbox-ng/plainbox/impl/session/state.py | 23 ++-- .../plainbox/impl/session/test_state.py | 18 +-- checkbox-ng/plainbox/impl/test_ctrl.py | 113 +++++++++++++++++- checkbox-ng/plainbox/suspend_consts.py | 29 +++++ docs/reference/units/job.rst | 32 +++-- 7 files changed, 298 insertions(+), 40 deletions(-) create mode 100644 checkbox-ng/plainbox/suspend_consts.py diff --git a/checkbox-ng/plainbox/impl/ctrl.py b/checkbox-ng/plainbox/impl/ctrl.py index 03aee471b2..b02dc7e7d9 100644 --- a/checkbox-ng/plainbox/impl/ctrl.py +++ b/checkbox-ng/plainbox/impl/ctrl.py @@ -7,7 +7,6 @@ # Checkbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, # as published by the Free Software Foundation. - # # Checkbox is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -49,6 +48,7 @@ class to select the best method to execute a command of a particular job. This import subprocess import sys import threading +from functools import partial from subprocess import check_output, CalledProcessError, STDOUT from plainbox.abc import IJobResult @@ -71,6 +71,7 @@ class to select the best method to execute a command of a particular job. This from plainbox.impl.unit.template import TemplateUnit from plainbox.impl.unit.unit import MissingParam from plainbox.impl.validation import Severity +from plainbox.suspend_consts import Suspend from plainbox.vendor import morris from plainbox.vendor import extcmd @@ -102,12 +103,14 @@ class CheckBoxSessionStateController(ISessionStateController): of resource definitions. """ - def get_dependency_set(self, job): + def get_dependency_set(self, job, job_list=None): """ Get the set of direct dependencies of a particular job. :param job: A IJobDefinition instance that is to be visited + :param job_list: + List of jobs to check dependencies from :returns: set of pairs (dep_type, job_id) @@ -125,15 +128,70 @@ def get_dependency_set(self, job): resource_deps = job.get_resource_dependencies() except ResourceProgramError: resource_deps = () + suspend_job_id_list = [ + Suspend.AUTO_JOB_ID, + Suspend.MANUAL_JOB_ID, + ] + if job.id in suspend_job_id_list: + suspend_deps = self._get_before_suspend_dependency_set( + job.id, job_list + ) + else: + suspend_deps = set() result = set( itertools.chain( zip(itertools.repeat(direct), direct_deps), zip(itertools.repeat(resource), resource_deps), zip(itertools.repeat(ordering), after_deps), + zip(itertools.repeat(ordering), suspend_deps), ) ) return result + def _get_before_suspend_dependency_set(self, suspend_job_id, job_list): + """ + Get the set of after dependencies of a suspend job. + + Jobs that have a ``also-after-suspend[-manual]`` flag should be run + before their associated suspend job. Similary, jobs that declare a + sibling with a dependency on a suspend job should be run before said + suspend job. This function finds these jobs and add them as a + dependency for their associated suspend job. + + :param suspend_job_id: + The id of a suspend job. One of the following is expected: + Suspend.AUTO_JOB_ID or Suspend.MANUAL_JOB_ID. + :param job_list: + List of jobs to search dependencies on. + :returns: + A set of job ids that need to be run before the suspend job + """ + p_suspend_job_id = partial( + self._is_job_impacting_suspend, suspend_job_id + ) + suspend_deps_jobs = filter(p_suspend_job_id, job_list) + suspend_deps = set(job.id for job in suspend_deps_jobs) + return suspend_deps + + def _is_job_impacting_suspend(self, suspend_job_id, job): + """ + Check if the ``suspend_job_id`` job needs to be run after a given + ``job``. This is the case if the ``job`` has a "also after suspend" + flag, or if it defines a sibling that has a dependency on the suspend + job. + """ + expected_flag = { + Suspend.AUTO_JOB_ID: Suspend.AUTO_FLAG, + Suspend.MANUAL_JOB_ID: Suspend.MANUAL_FLAG, + }.get(suspend_job_id) + if job.flags and expected_flag in job.flags: + return True + if job.siblings: + for sibling_data in json.loads(job.tr_siblings()): + if suspend_job_id in sibling_data.get("depends", []): + return True + return False + def get_inhibitor_list(self, session_state, job): """ Get a list of readiness inhibitors that inhibit a particular job. @@ -236,8 +294,59 @@ def get_inhibitor_list(self, session_state, job): related_job=dep_job_state.job, ) inhibitors.append(inhibitor) + if job.id in [Suspend.AUTO_JOB_ID, Suspend.MANUAL_JOB_ID]: + for inhibitor in self._get_suspend_inhibitor_list( + session_state, job + ): + inhibitors.append(inhibitor) return inhibitors + def _get_suspend_inhibitor_list(self, session_state, suspend_job): + """ + Get a list of readiness inhibitors that inhibit a suspend job. + + Jobs that have a ``also-after-suspend[-manual]`` flag should be run + before their associated suspend job. Similary, jobs that declare a + sibling with a dependency on a suspend job should be run before said + suspend job. This function finds these jobs and add them as a + inhibitor for their associated suspend job. + + :param session_state: + A SessionState instance that is used to interrogate the + state of the session where it matters for a particular + job. Currently this is used to access resources and job + results. + :param suspend_job: + A suspend job. + :returns: + List of JobReadinessInhibitor + """ + suspend_inhibitors = [] + undesired_inhibitor = JobReadinessInhibitor( + cause=InhibitionCause.UNDESIRED + ) + # We are only interested in jobs that are actually going to run + run_list = [ + state.job + for state in session_state.job_state_map.values() + if undesired_inhibitor not in state.readiness_inhibitor_list + ] + p_suspend_job_id = partial( + self._is_job_impacting_suspend, suspend_job.id + ) + suspend_inhibitors_jobs = filter(p_suspend_job_id, run_list) + for job in suspend_inhibitors_jobs: + if ( + session_state.job_state_map[job.id].result.outcome + == IJobResult.OUTCOME_NONE + ): + inhibitor = JobReadinessInhibitor( + cause=InhibitionCause.PENDING_DEP, + related_job=job, + ) + suspend_inhibitors.append(inhibitor) + return suspend_inhibitors + def observe_result(self, session_state, job, result, fake_resources=False): """ Notice the specified test result and update readiness state. diff --git a/checkbox-ng/plainbox/impl/depmgr.py b/checkbox-ng/plainbox/impl/depmgr.py index f91bc2f5b5..d0e9c7c07c 100644 --- a/checkbox-ng/plainbox/impl/depmgr.py +++ b/checkbox-ng/plainbox/impl/depmgr.py @@ -340,12 +340,12 @@ def _solve(self, visit_list=None): if visit_list is None: visit_list = self._job_list for job in visit_list: - self._visit(job) + self._visit(job=job, visit_list=visit_list) logger.debug(_("Done solving")) # Return the solution return self._solution - def _visit(self, job, trail=None): + def _visit(self, job, visit_list, trail=None): """ Internal method of DependencySolver. @@ -367,7 +367,9 @@ def _visit(self, job, trail=None): # If the trail was not specified start a trail for this node if trail is None: trail = [job] - for dep_type, job_id in job.controller.get_dependency_set(job): + for dep_type, job_id in job.controller.get_dependency_set( + job, visit_list + ): # Dependency is just an id, we need to resolve it # to a job instance. This can fail (missing dependencies) # so let's guard against that. @@ -383,7 +385,7 @@ def _visit(self, job, trail=None): logger.debug(_("Visiting dependency: %r"), next_job) # Update the trail as we visit that node trail.append(next_job) - self._visit(next_job, trail) + self._visit(next_job, visit_list, trail) trail.pop() # We've visited (recursively) all dependencies of this node, # let's color it black and append it to the solution list. diff --git a/checkbox-ng/plainbox/impl/session/state.py b/checkbox-ng/plainbox/impl/session/state.py index ef10d7c4bc..46eb54fbc8 100644 --- a/checkbox-ng/plainbox/impl/session/state.py +++ b/checkbox-ng/plainbox/impl/session/state.py @@ -41,6 +41,7 @@ from plainbox.impl.unit.job import JobDefinition from plainbox.impl.unit.unit_with_id import UnitWithId from plainbox.impl.unit.testplan import TestPlanUnitSupport +from plainbox.suspend_consts import Suspend from plainbox.vendor import morris @@ -1091,24 +1092,21 @@ def _add_job_siblings_unit(self, new_job, recompute, via): field_offset_map=new_job.field_offset_map), recompute, via) - if 'also-after-suspend' in new_job.get_flag_set(): + if Suspend.AUTO_FLAG in new_job.get_flag_set(): data = { key: value for key, value in new_job._data.items() if not key.endswith('siblings') } - data['flags'] = data['flags'].replace('also-after-suspend', '') - data['flags'] = data['flags'].replace( - 'also-after-suspend-manual', '') + data['flags'] = data['flags'].replace(Suspend.AUTO_FLAG, '') + data['flags'] = data['flags'].replace(Suspend.MANUAL_FLAG, '') data['id'] = "after-suspend-{}".format(new_job.partial_id) data['_summary'] = "{} after suspend (S3)".format( new_job.summary) - provider_id = "com.canonical.certification" - suspend_test_id = "suspend/suspend_advanced_auto" if new_job.depends: data['depends'] += " {}".format(new_job.id) else: data['depends'] = "{}".format(new_job.id) - data['depends'] += " {}::{}".format(provider_id, suspend_test_id) + data['depends'] += " {}".format(Suspend.AUTO_JOB_ID) self._add_job_unit( JobDefinition( data, @@ -1119,24 +1117,21 @@ def _add_job_siblings_unit(self, new_job, recompute, via): field_offset_map=new_job.field_offset_map), recompute, via) - if 'also-after-suspend-manual' in new_job.get_flag_set(): + if Suspend.MANUAL_FLAG in new_job.get_flag_set(): data = { key: value for key, value in new_job._data.items() if not key.endswith('siblings') } - data['flags'] = data['flags'].replace('also-after-suspend', '') - data['flags'] = data['flags'].replace( - 'also-after-suspend-manual', '') + data['flags'] = data['flags'].replace(Suspend.AUTO_FLAG, '') + data['flags'] = data['flags'].replace(Suspend.MANUAL_FLAG, '') data['id'] = "after-suspend-manual-{}".format(new_job.partial_id) data['_summary'] = "{} after suspend (S3)".format( new_job.summary) - provider_id = "com.canonical.certification" - suspend_test_id = "suspend/suspend_advanced" if new_job.depends: data['depends'] += " {}".format(new_job.id) else: data['depends'] = "{}".format(new_job.id) - data['depends'] += " {}::{}".format(provider_id, suspend_test_id) + data['depends'] += " {}".format(Suspend.MANUAL_JOB_ID) self._add_job_unit( JobDefinition( data, diff --git a/checkbox-ng/plainbox/impl/session/test_state.py b/checkbox-ng/plainbox/impl/session/test_state.py index 2adafd9763..7f1dec63fe 100644 --- a/checkbox-ng/plainbox/impl/session/test_state.py +++ b/checkbox-ng/plainbox/impl/session/test_state.py @@ -43,8 +43,8 @@ from plainbox.impl.testing_utils import make_job from plainbox.impl.unit.job import JobDefinition from plainbox.impl.unit.category import CategoryUnit - from plainbox.impl.unit.unit_with_id import UnitWithId +from plainbox.suspend_consts import Suspend from plainbox.vendor import mock from plainbox.vendor.morris import SignalTestCase @@ -309,7 +309,7 @@ def test_add_sibling_unit(self): def test_also_after_suspend_flag(self): # Define a job - job = make_job("A", summary="foo", flags="also-after-suspend") + job = make_job("A", summary="foo", flags=Suspend.AUTO_FLAG) # Define an empty session session = SessionState([]) # Add the job to the session @@ -319,12 +319,13 @@ def test_also_after_suspend_flag(self): self.assertIn(job, session.job_list) self.assertEqual(session.job_list[1].id, "after-suspend-A") self.assertEqual(session.job_list[1].summary, "foo after suspend (S3)") + expected_depends = "A {}".format(Suspend.AUTO_JOB_ID) self.assertEqual( session.job_list[1].depends, - ("A com.canonical.certification::suspend/suspend_advanced_auto"), + (expected_depends), ) sibling = session.job_list[1] - self.assertNotIn("also-after-suspend", sibling.get_flag_set()) + self.assertNotIn(Suspend.AUTO_FLAG, sibling.get_flag_set()) # Both jobs got added to job state map self.assertIs(session.job_state_map[job.id].job, job) self.assertIs(session.job_state_map[sibling.id].job, sibling) @@ -346,7 +347,7 @@ def test_also_after_suspend_flag(self): def test_also_after_suspend_manual_flag(self): # Define a job - job = make_job("A", summary="foo", flags="also-after-suspend-manual") + job = make_job("A", summary="foo", flags=Suspend.MANUAL_FLAG) # Define an empty session session = SessionState([]) # Add the job to the session @@ -356,12 +357,13 @@ def test_also_after_suspend_manual_flag(self): self.assertIn(job, session.job_list) self.assertEqual(session.job_list[1].id, "after-suspend-manual-A") self.assertEqual(session.job_list[1].summary, "foo after suspend (S3)") + expected_depends = "A {}".format(Suspend.MANUAL_JOB_ID) self.assertEqual( session.job_list[1].depends, - "A com.canonical.certification::suspend/suspend_advanced", + expected_depends, ) sibling = session.job_list[1] - self.assertNotIn("also-after-suspend-manual", sibling.get_flag_set()) + self.assertNotIn(Suspend.MANUAL_FLAG, sibling.get_flag_set()) # Both jobs got added to job state map self.assertIs(session.job_state_map[job.id].job, job) self.assertIs(session.job_state_map[sibling.id].job, sibling) @@ -1215,4 +1217,4 @@ def test_on_job_removed__via_state(self): sig1 = self.assertSignalFired(self.ctx.on_unit_removed, self.job) sig2 = self.assertSignalFired(self.ctx.state.on_unit_removed, self.job) sig3 = self.assertSignalFired(self.ctx.state.on_job_removed, self.job) - self.assertSignalOrdering(sig1, sig2, sig3) \ No newline at end of file + self.assertSignalOrdering(sig1, sig2, sig3) diff --git a/checkbox-ng/plainbox/impl/test_ctrl.py b/checkbox-ng/plainbox/impl/test_ctrl.py index fa31d835d5..c032dd5e5d 100644 --- a/checkbox-ng/plainbox/impl/test_ctrl.py +++ b/checkbox-ng/plainbox/impl/test_ctrl.py @@ -7,7 +7,6 @@ # Checkbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, # as published by the Free Software Foundation. - # # Checkbox is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -26,12 +25,14 @@ from subprocess import CalledProcessError from unittest import TestCase +import json import os import shutil from plainbox.abc import IJobResult from plainbox.abc import IProvider1 from plainbox.abc import IProviderBackend1 +from plainbox.suspend_consts import Suspend from plainbox.impl.ctrl import CheckBoxSessionStateController from plainbox.impl.ctrl import SymLinkNest from plainbox.impl.ctrl import gen_rfc822_records_from_io_log @@ -102,6 +103,19 @@ def test_get_dependency_set(self): self.assertEqual( self.ctrl.get_dependency_set(job_f), {('direct', 'j6'), ('resource', 'j6')}) + # Job with an "also-after-suspend" flag, meaning this job should be + # set to run before the suspend job + job_g = JobDefinition({ + "id": "j7", + "flags": Suspend.AUTO_FLAG + }) + suspend_job = JobDefinition({ + "id": Suspend.AUTO_JOB_ID + }) + self.assertEqual( + self.ctrl.get_dependency_set(suspend_job, [job_g]), + {("ordering", "j7")} + ) def test_get_inhibitor_list_PENDING_RESOURCE(self): # verify that jobs that require a resource that hasn't been @@ -227,6 +241,30 @@ def test_get_inhibitor_list_FAILED_DEP(self): [JobReadinessInhibitor( InhibitionCause.FAILED_DEP, j2, None)]) + def test_get_inhibitor_list_NOT_FAILED_DEP(self): + # verify that jobs that depend on another job that ran but + # didn't result in OUTCOME_FAIL produce the NOT_FAILED_DEP + # inhibitor. + j1 = JobDefinition({ + 'id': 'j1', + 'salvages': 'j2', + }) + j2 = JobDefinition({ + 'id': 'j2' + }) + session_state = mock.MagicMock(spec=SessionState) + session_state.job_state_map = { + 'j1': mock.Mock(spec_set=JobState), + 'j2': mock.Mock(spec_set=JobState), + } + jsm_j2 = session_state.job_state_map['j2'] + jsm_j2.job = j2 + jsm_j2.result.outcome = IJobResult.OUTCOME_NONE + self.assertEqual( + self.ctrl.get_inhibitor_list(session_state, j1), + [JobReadinessInhibitor( + InhibitionCause.NOT_FAILED_DEP, j2, None)]) + def test_get_inhibitor_list_good_dep(self): # verify that jobs that depend on another job that ran and has outcome # equal to OUTCOME_PASS don't have any inhibitors @@ -256,6 +294,79 @@ def test_get_inhibitor_list_good_dep(self): self.assertEqual( self.ctrl.get_inhibitor_list(session_state, j1), []) + def test_get_inhibitor_list__suspend_job(self): + j1 = JobDefinition({ + "id": "j1", + "flags": Suspend.AUTO_FLAG, + }) + j2 = JobDefinition({ + "id": "j2", + }) + suspend_job = JobDefinition({ + "id": Suspend.AUTO_JOB_ID + }) + session_state = mock.MagicMock(spec=SessionState) + session_state.job_state_map = { + "j1": mock.Mock(spec_set=JobState), + "j2": mock.Mock(spec_set=JobState), + Suspend.AUTO_JOB_ID: mock.Mock(spec_set=JobState), + } + jsm_j1 = session_state.job_state_map["j1"] + jsm_j1.job = j1 + jsm_j1.result.outcome = IJobResult.OUTCOME_NONE + jsm_j1.readiness_inhibitor_list = [] + jsm_j2 = session_state.job_state_map["j2"] + jsm_j2.job = j2 + jsm_j2.result.outcome = IJobResult.OUTCOME_NONE + jsm_j2.readiness_inhibitor_list = [] + jsm_suspend = session_state.job_state_map[Suspend.AUTO_JOB_ID] + jsm_suspend.job = suspend_job + jsm_suspend.result.outcome = IJobResult.OUTCOME_NONE + jsm_suspend.readiness_inhibitor_list = [] + self.assertEqual( + self.ctrl.get_inhibitor_list(session_state, suspend_job), + [JobReadinessInhibitor(InhibitionCause.PENDING_DEP, j1, None)]) + + def test_is_job_impacting_suspend__wrong_suspend_job(self): + job = JobDefinition({ + "id": "job", + }) + self.assertEqual( + self.ctrl._is_job_impacting_suspend("wrong-suspend-job-id", job), + False + ) + + def test_is_job_impacting_suspend__flag(self): + job = JobDefinition({ + "id": "job", + "flags": "also-after-suspend", + }) + self.assertEqual( + self.ctrl._is_job_impacting_suspend(Suspend.AUTO_JOB_ID, job), + True + ) + self.assertEqual( + self.ctrl._is_job_impacting_suspend(Suspend.MANUAL_JOB_ID, job), + False + ) + + def test_is_job_impacting_suspend__siblings(self): + job = JobDefinition({ + "id": "job", + "siblings": json.dumps([{ + "id": "sibling-j1", + "depends": Suspend.MANUAL_JOB_ID, + }]) + }) + self.assertEqual( + self.ctrl._is_job_impacting_suspend(Suspend.AUTO_JOB_ID, job), + False + ) + self.assertEqual( + self.ctrl._is_job_impacting_suspend(Suspend.MANUAL_JOB_ID, job), + True + ) + def test_observe_result__normal(self): job = mock.Mock(spec=JobDefinition) result = mock.Mock(spec=IJobResult) diff --git a/checkbox-ng/plainbox/suspend_consts.py b/checkbox-ng/plainbox/suspend_consts.py new file mode 100644 index 0000000000..0424da00fa --- /dev/null +++ b/checkbox-ng/plainbox/suspend_consts.py @@ -0,0 +1,29 @@ +# This file is part of Checkbox. +# +# Copyright 2024 Canonical Ltd. +# Written by: +# Pierre Equoy +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . + +""" +:mod:`plainbox.impl.suspend_consts` -- Suspend-related constants +================================================================ +""" + + +class Suspend: + AUTO_JOB_ID = "com.canonical.certification::suspend/suspend_advanced_auto" + MANUAL_JOB_ID = "com.canonical.certification::suspend/suspend_advanced" + AUTO_FLAG = "also-after-suspend" + MANUAL_FLAG = "also-after-suspend-manual" diff --git a/docs/reference/units/job.rst b/docs/reference/units/job.rst index e576873502..f8a0e7fcb9 100644 --- a/docs/reference/units/job.rst +++ b/docs/reference/units/job.rst @@ -315,11 +315,17 @@ Following fields may be used by the job unit: .. _also-after-suspend flag: - ``also-after-suspend``: See :ref:`Job siblings field` below. + ``also-after-suspend``: + Ensure the test will be run before **and** after suspend by creating + a :ref:`sibling` that will depend on the automated + suspend job. The current job is guaranteed to run before suspend. .. _also-after-suspend-manual flag: - ``also-after-suspend-manual``: See :ref:`Job siblings field` below. + ``also-after-suspend-manual``: + Ensure the test will be run before **and** after suspend by creating + a :ref:`sibling` that will depend on the manual + suspend job. The current job is guaranteed to run before suspend. Additional flags may be present in job definition; they are ignored. @@ -399,16 +405,20 @@ Following fields may be used by the job unit: com.canonical.certification::suspend/suspend_advanced foo -.. warning:: - The curly braces used in this field have to be escaped when used in a - template job (python format, Jinja2 templates do not have this issue). - The syntax for templates is:: + .. note:: + If the sibling definition depends on one of the suspend jobs, Checkbox + will make sure the original job runs **before** the suspend job. - _siblings: [ - {{ "id": "bar-after-suspend_{interface}", - "_summary": "bar after suspend", - "depends": "suspend/advanced"}} - ] + .. warning:: + The curly braces used in this field have to be escaped when used in a + template job (python format, Jinja2 templates do not have this issue). + The syntax for templates is:: + + _siblings: [ + {{ "id": "bar-after-suspend_{interface}", + "_summary": "bar after suspend", + "depends": "suspend/advanced"}} + ] .. _Job imports field: From 6f5a021ee3c8384ef18b078c46d89fb38c059ff0 Mon Sep 17 00:00:00 2001 From: stanley31huang Date: Thu, 7 Mar 2024 13:28:44 +0800 Subject: [PATCH 067/108] [checkbox-ce-oem] fixed CAN bus and serial RS485 bug (Bugfix) (#1044) * Fixed the logic of validate CAN frame packets fixed the logic of validate CAN frame packets * fixed the command issue for rs485-remote test fixed the command issue for ce-oem-serial/rs458-remote test --- contrib/checkbox-provider-ce-oem/bin/socketcan_test.py | 4 ++-- contrib/checkbox-provider-ce-oem/units/serial/jobs.pxu | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/contrib/checkbox-provider-ce-oem/bin/socketcan_test.py b/contrib/checkbox-provider-ce-oem/bin/socketcan_test.py index abe28fa61e..c62c4e9a31 100755 --- a/contrib/checkbox-provider-ce-oem/bin/socketcan_test.py +++ b/contrib/checkbox-provider-ce-oem/bin/socketcan_test.py @@ -231,8 +231,8 @@ def stress_echo_test(interface, can_id, eff_flag, fd_mode, count=30): failed_count = 0 for index, data in enumerate(original_records): # validate data field in CAN packet only - if _validate_packet_data(can_socket, data, - recv_records[index]): + if not _validate_packet_data(can_socket, data, + recv_records[index]): failed_count += 1 if failed_count > 0: diff --git a/contrib/checkbox-provider-ce-oem/units/serial/jobs.pxu b/contrib/checkbox-provider-ce-oem/units/serial/jobs.pxu index 0cb1007b5d..0d45780fa5 100644 --- a/contrib/checkbox-provider-ce-oem/units/serial/jobs.pxu +++ b/contrib/checkbox-provider-ce-oem/units/serial/jobs.pxu @@ -1,7 +1,7 @@ id: ce-oem-serial/rs485-list _summary: Generates a RS485 resource based on user supplied configuration _description: - A RS485 resource that relies on the user specifying the number of RS485 port. + A RS485 resource that relies on the user specifying the number of RS485 port. This is to allow template jobs to then be instantiated. Usage of parameter: {port1} {port2} RS485_PORTS=/dev/ttymxc1 /dev/ttymxc2 @@ -18,7 +18,7 @@ template-resource: ce-oem-serial/rs485-list template-unit: job id: ce-oem-serial/rs485-remote-{RS485} _summary: To check the port {RS485} can working on RS485 half-duplex mode. -_purpose: +_purpose: To check the port {RS485} can send and receive the string with RS485 half-duplex mode. _description: @@ -29,8 +29,8 @@ user: root category_id: com.canonical.certification::serial estimated_duration: 30 flags: also-after-suspend -command: serial-remote.py {RS485} --mode client -requires: +command: serial_test.py {RS485} --mode client +requires: manifest.has_rs485_server == 'True' manifest.has_rs485 == 'True' -imports: from com.canonical.plainbox import manifest \ No newline at end of file +imports: from com.canonical.plainbox import manifest From d102d9301acfe0200bd592d9d4f3e4ede1681b4e Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Fri, 8 Mar 2024 16:32:19 +0800 Subject: [PATCH 068/108] Add Genio provider to contrib area (Infra) (#1017) * Add: land Genio ODM Test Plan * Fix: update commands and add HDMI RX menifest * Move the Genio provider to the contrib area * Update Genio provider manage.py to match the contrib namespace * Import executable resource job where required Genio provider has been moved to the contrib namespace, and therefore cannot call the "executable" job from the resource provider anymore. The "executable" job needs to be imported before being used. Fix RTW-251 * Add contrib Genio provider GitHub Action * Amend tox install commands * Undo changes made for the Genio project Since Genio has not landed yet in the generic providers, the snapcraft recipe should not be modified. * Removing Genio test plans from generic providers Since Genio is currently in the contrib area, test plans for the certification-client provider are not required. * Prevents a Genio GitHub workflow from failing when a provider checks fail * Deactivate automated workflows of providers in the contrib area Keep workflows for ce-oem and genio providers in the contrib area, but don't run them automatically to prevent blocking landing. --------- Co-authored-by: baconyao --- .github/workflows/tox-contrib-genio.yaml | 40 + .../tox-contrib-provider-ce-oem.yaml | 9 +- contrib/genio/.coveragerc | 16 + contrib/genio/README.md | 8 + contrib/genio/bin/boot_partition.py | 196 ++++ contrib/genio/bin/brightness_test.py | 220 +++++ contrib/genio/bin/cpu_idle.py | 264 ++++++ contrib/genio/bin/dvfs_gpu_check_governors.py | 42 + contrib/genio/bin/gpio_loopback_test.py | 249 +++++ contrib/genio/bin/hdmirx_output_checker.sh | 93 ++ contrib/genio/bin/hdmirx_tool_runner.sh | 124 +++ contrib/genio/bin/linux_ccf.py | 82 ++ contrib/genio/bin/pmic_regulator.py | 88 ++ contrib/genio/bin/serialcheck.py | 64 ++ contrib/genio/bin/set_as_performance_mode.sh | 192 ++++ contrib/genio/bin/spidev_test.py | 70 ++ contrib/genio/bin/verify-mt8188-ccf.sh | 106 +++ contrib/genio/bin/verify-mt8195-ccf.sh | 79 ++ contrib/genio/data/linux-ccf/mt8188-clk.h | 734 +++++++++++++++ contrib/genio/data/linux-ccf/mt8195-clk.h | 864 ++++++++++++++++++ contrib/genio/data/spi/test.bin | 1 + .../launcher/genio-odm-certification-G1200 | 32 + .../launcher/genio-odm-certification-G350 | 35 + .../launcher/genio-odm-certification-G700 | 31 + contrib/genio/manage.py | 22 + contrib/genio/tox.ini | 85 ++ contrib/genio/units/audio/category.pxu | 3 + contrib/genio/units/audio/jobs.pxu | 246 +++++ contrib/genio/units/audio/manifest.pxu | 17 + contrib/genio/units/audio/test-plan.pxu | 54 ++ contrib/genio/units/boot/category.pxu | 3 + contrib/genio/units/boot/jobs.pxu | 30 + contrib/genio/units/boot/test-plan.pxu | 19 + contrib/genio/units/device/jobs.pxu | 14 + contrib/genio/units/display/category.pxu | 3 + contrib/genio/units/display/jobs.pxu | 70 ++ contrib/genio/units/display/manifest.pxu | 17 + contrib/genio/units/display/test-plan.pxu | 58 ++ contrib/genio/units/ebbr/category.pxu | 3 + contrib/genio/units/ebbr/jobs.pxu | 40 + contrib/genio/units/ebbr/test-plan.pxu | 43 + contrib/genio/units/graphic/category.pxu | 3 + contrib/genio/units/graphic/jobs.pxu | 124 +++ contrib/genio/units/graphic/test-plan.pxu | 53 ++ contrib/genio/units/hdmi-rx/category.pxu | 3 + contrib/genio/units/hdmi-rx/jobs.pxu | 255 ++++++ contrib/genio/units/hdmi-rx/manifest.pxu | 5 + contrib/genio/units/hdmi-rx/test-plan.pxu | 56 ++ contrib/genio/units/info/category.pxu | 3 + contrib/genio/units/info/jobs.pxu | 8 + contrib/genio/units/info/test-plan.pxu | 39 + contrib/genio/units/peripheral/category.pxu | 3 + contrib/genio/units/peripheral/jobs.pxu | 197 ++++ contrib/genio/units/peripheral/manifest.pxu | 41 + contrib/genio/units/peripheral/test-plan.pxu | 57 ++ .../genio/units/power-management/category.pxu | 3 + contrib/genio/units/power-management/jobs.pxu | 162 ++++ .../units/power-management/test-plan.pxu | 57 ++ contrib/genio/units/thermal/category.pxu | 3 + contrib/genio/units/thermal/jobs.pxu | 67 ++ contrib/genio/units/thermal/test-plan.pxu | 45 + 61 files changed, 5542 insertions(+), 8 deletions(-) create mode 100644 .github/workflows/tox-contrib-genio.yaml create mode 100644 contrib/genio/.coveragerc create mode 100644 contrib/genio/README.md create mode 100755 contrib/genio/bin/boot_partition.py create mode 100755 contrib/genio/bin/brightness_test.py create mode 100755 contrib/genio/bin/cpu_idle.py create mode 100755 contrib/genio/bin/dvfs_gpu_check_governors.py create mode 100755 contrib/genio/bin/gpio_loopback_test.py create mode 100755 contrib/genio/bin/hdmirx_output_checker.sh create mode 100755 contrib/genio/bin/hdmirx_tool_runner.sh create mode 100755 contrib/genio/bin/linux_ccf.py create mode 100755 contrib/genio/bin/pmic_regulator.py create mode 100755 contrib/genio/bin/serialcheck.py create mode 100755 contrib/genio/bin/set_as_performance_mode.sh create mode 100755 contrib/genio/bin/spidev_test.py create mode 100755 contrib/genio/bin/verify-mt8188-ccf.sh create mode 100755 contrib/genio/bin/verify-mt8195-ccf.sh create mode 100755 contrib/genio/data/linux-ccf/mt8188-clk.h create mode 100755 contrib/genio/data/linux-ccf/mt8195-clk.h create mode 100644 contrib/genio/data/spi/test.bin create mode 100644 contrib/genio/launcher/genio-odm-certification-G1200 create mode 100644 contrib/genio/launcher/genio-odm-certification-G350 create mode 100644 contrib/genio/launcher/genio-odm-certification-G700 create mode 100755 contrib/genio/manage.py create mode 100644 contrib/genio/tox.ini create mode 100644 contrib/genio/units/audio/category.pxu create mode 100644 contrib/genio/units/audio/jobs.pxu create mode 100644 contrib/genio/units/audio/manifest.pxu create mode 100644 contrib/genio/units/audio/test-plan.pxu create mode 100644 contrib/genio/units/boot/category.pxu create mode 100644 contrib/genio/units/boot/jobs.pxu create mode 100644 contrib/genio/units/boot/test-plan.pxu create mode 100644 contrib/genio/units/device/jobs.pxu create mode 100644 contrib/genio/units/display/category.pxu create mode 100644 contrib/genio/units/display/jobs.pxu create mode 100644 contrib/genio/units/display/manifest.pxu create mode 100644 contrib/genio/units/display/test-plan.pxu create mode 100644 contrib/genio/units/ebbr/category.pxu create mode 100644 contrib/genio/units/ebbr/jobs.pxu create mode 100644 contrib/genio/units/ebbr/test-plan.pxu create mode 100644 contrib/genio/units/graphic/category.pxu create mode 100644 contrib/genio/units/graphic/jobs.pxu create mode 100644 contrib/genio/units/graphic/test-plan.pxu create mode 100644 contrib/genio/units/hdmi-rx/category.pxu create mode 100644 contrib/genio/units/hdmi-rx/jobs.pxu create mode 100644 contrib/genio/units/hdmi-rx/manifest.pxu create mode 100644 contrib/genio/units/hdmi-rx/test-plan.pxu create mode 100644 contrib/genio/units/info/category.pxu create mode 100644 contrib/genio/units/info/jobs.pxu create mode 100644 contrib/genio/units/info/test-plan.pxu create mode 100644 contrib/genio/units/peripheral/category.pxu create mode 100644 contrib/genio/units/peripheral/jobs.pxu create mode 100644 contrib/genio/units/peripheral/manifest.pxu create mode 100644 contrib/genio/units/peripheral/test-plan.pxu create mode 100644 contrib/genio/units/power-management/category.pxu create mode 100644 contrib/genio/units/power-management/jobs.pxu create mode 100644 contrib/genio/units/power-management/test-plan.pxu create mode 100644 contrib/genio/units/thermal/category.pxu create mode 100644 contrib/genio/units/thermal/jobs.pxu create mode 100644 contrib/genio/units/thermal/test-plan.pxu diff --git a/.github/workflows/tox-contrib-genio.yaml b/.github/workflows/tox-contrib-genio.yaml new file mode 100644 index 0000000000..dd37a62bd1 --- /dev/null +++ b/.github/workflows/tox-contrib-genio.yaml @@ -0,0 +1,40 @@ +name: Test provider-genio (from contrib area) with tox + +on: + workflow_dispatch: + +jobs: + tox_test_contrib_ce_oem_provider: + continue-on-error: true + name: Test Genio provider (from contrib area) with tox + defaults: + run: + working-directory: contrib/genio + runs-on: ubuntu-20.04 + strategy: + matrix: + python: ["3.5", "3.6", "3.8", "3.10"] + include: + - python: "3.5" + tox_env_name: "py35" + - python: "3.6" + tox_env_name: "py36" + - python: "3.8" + tox_env_name: "py38" + - python: "3.10" + tox_env_name: "py310" + steps: + - uses: actions/checkout@v3 + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + - name: Install tox + run: pip install tox + - name: Run tox + run: tox -e${{ matrix.tox_env_name }} + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + flags: contrib-provider-genio diff --git a/.github/workflows/tox-contrib-provider-ce-oem.yaml b/.github/workflows/tox-contrib-provider-ce-oem.yaml index 673bd4540f..fe4fd81ffd 100644 --- a/.github/workflows/tox-contrib-provider-ce-oem.yaml +++ b/.github/workflows/tox-contrib-provider-ce-oem.yaml @@ -1,18 +1,11 @@ name: Test provider-ce-oem (from contrib area) with tox on: - push: - branches: [ main ] - paths: - - contrib/checkbox-provider-ce-oem/** - pull_request: - branches: [ main ] - paths: - - contrib/checkbox-provider-ce-oem/** workflow_dispatch: jobs: tox_test_contrib_ce_oem_provider: + continue-on-error: true name: Test ce-oem provider (from contrib area) with tox defaults: run: diff --git a/contrib/genio/.coveragerc b/contrib/genio/.coveragerc new file mode 100644 index 0000000000..07c0c792a6 --- /dev/null +++ b/contrib/genio/.coveragerc @@ -0,0 +1,16 @@ +[run] +branch = True +source = bin,manage +omit = + tests/* + +[report] +exclude_lines = + @abc.abstractmethod + @abc.abstractproperty + @abstractmethod + @abstractproperty + @public + pragma: no cover + raise NotImplementedError +show_missing = True diff --git a/contrib/genio/README.md b/contrib/genio/README.md new file mode 100644 index 0000000000..633ff65ac2 --- /dev/null +++ b/contrib/genio/README.md @@ -0,0 +1,8 @@ +Checkbox Provider - Genio +========================= + +A provider in which to land tests for the Genio partnership. + +That is it for now. You should check out the official documentation +for test authors at +https://checkbox.readthedocs.io/en/latest/index.html diff --git a/contrib/genio/bin/boot_partition.py b/contrib/genio/bin/boot_partition.py new file mode 100755 index 0000000000..88f73bf0a9 --- /dev/null +++ b/contrib/genio/bin/boot_partition.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 +# This script should be run as a super user + +import subprocess +import pathlib +import json +from argparse import ArgumentParser + + +def runcmd(command): + ret = subprocess.run(command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding="utf-8", + timeout=1) + return ret + + +class TestPartedBootDevice(): + + def __init__(self): + self.path = None + self.actual_result = None + self.expected_result = None + self.expected_result_UFS = { + "logical-sector-size": 4096, + "physical-sector-size": 4096, + "partitions": [ + { + "number": 1, + "name": "bootloaders" + }, { + "number": 2, + "name": "bootloaders_b" + }, { + "number": 3, + "name": "firmware" + }, { + "number": 4, + "name": "firmware_b" + }, { + "number": 5, + "name": "dramk" + }, { + "number": 6, + "name": "misc" + }, { + "number": 7, + "name": "bootassets" + }, { + "number": 8, + "name": "ubuntu-boot" + }, { + "number": 9, + "name": "writable" + } + ] + } + self.expected_result_EMMC = { + "logical-sector-size": 512, + "physical-sector-size": 512, + "partitions": [ + { + "number": 1, + "name": "bootloaders" + }, { + "number": 2, + "name": "bootloaders_b" + }, { + "number": 3, + "name": "firmware" + }, { + "number": 4, + "name": "firmware_b" + }, { + "number": 5, + "name": "dramk" + }, { + "number": 6, + "name": "misc" + }, { + "number": 7, + "name": "bootassets" + }, { + "number": 8, + "name": "ubuntu-boot" + }, { + "number": 9, + "name": "writable" + } + ] + } + + def check_is_block_device(self): + print("\nChecking if it is block device...") + if pathlib.Path(self.path).is_block_device(): + print("PASS: {} is a block device!".format(self.path)) + else: + raise SystemExit("FAIL: {} is not a block device!" + .format(self.path)) + + def check_disk(self): + print("\nChecking Parted...") + self.check_sector_size() + self.check_partitions() + + def get_disk_information(self): + print("\nGetting disk information in json") + ret = runcmd(["genio-test-tool.parted {} print -j".format(self.path)]) + self.actual_result = json.loads(ret.stdout)["disk"] + if self.path == "/dev/sdc": + self.expected_result = self.expected_result_UFS + elif self.path == "/dev/mmcblk0": + self.expected_result = self.expected_result_EMMC + else: + raise SystemExit("ERROR: Unrecognized device name!") + + def check_sector_size(self): + print("\nChecking Logical Sector Size...") + try: + if self.actual_result["logical-sector-size"] == \ + self.expected_result["logical-sector-size"]: + print("logical sector size: {}" + .format(self.actual_result["logical-sector-size"])) + print("PASS: Logical sector size is correct!") + else: + raise SystemExit("FAIL: Logical sector size is incorrect!") + except KeyError: + raise SystemExit("ERROR: logical-sector-size is not found") + print("\nChecking Physical Sector Size...") + try: + if self.actual_result["physical-sector-size"] == \ + self.expected_result["physical-sector-size"]: + print("physical sector size: {}" + .format(self.actual_result["physical-sector-size"])) + print("PASS: Physical sector size is correct!") + else: + raise SystemExit("FAIL: Physical sector size is incorrect!") + except KeyError: + raise SystemExit("ERROR: physical-sector-size is not found") + + def check_partitions(self): + print("\nChecking partitions...") + try: + actual_partitions = self.actual_result["partitions"] + expected_partitions = self.expected_result["partitions"] + if len(actual_partitions) != len(expected_partitions): + raise SystemExit("ERROR: Partitions count is incorrect!") + for actual_partition, expected_partition in \ + zip(actual_partitions, expected_partitions): + if actual_partition["number"] != expected_partition["number"]: + raise SystemExit("ERROR: Partition number is incorrect!") + if actual_partition["name"] != expected_partition["name"]: + raise SystemExit("ERROR: Partition name is incorrect") + except KeyError: + raise SystemExit("ERROR: Partitions not found!") + print("PASS: Paritions are correct!") + + def check_device(self, exit_when_check_fail): + ret = runcmd("lsblk") + if "sdc" in ret.stdout: + print("device: ufs") + print("path: /dev/sdc") + print() + elif "mmc" in ret.stdout: + print("device: emmc") + print("path: /dev/mmcblk0") + print() + elif exit_when_check_fail: + raise SystemExit("ERROR: Cannot find sdc or mmcblk0 in dev") + + def main(self): + parser = ArgumentParser(description="Check if the disk information\ + is correct") + parser.add_argument('--path', + help='the device path for checking') + parser.add_argument("--check_device_name", + help="To check the device name", + action="store_true") + parser.add_argument("--exit_when_check_fail", + help="Exit with error code when the device check \ + failed", + action="store_true") + args = parser.parse_args() + if args.check_device_name: + self.check_device(args.exit_when_check_fail) + return + self.path = args.path + self.check_is_block_device() + self.get_disk_information() + self.check_disk() + + +if __name__ == '__main__': + TestPartedBootDevice().main() diff --git a/contrib/genio/bin/brightness_test.py b/contrib/genio/bin/brightness_test.py new file mode 100755 index 0000000000..1883efebde --- /dev/null +++ b/contrib/genio/bin/brightness_test.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# brightness_test.py +# +# This file is part of Checkbox. +# +# Copyright 2012-2018 Canonical Ltd. +# +# Authors: +# Alberto Milone +# Sylvain Pineau +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. + +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . +# +# +# Patrick Chang modified this script for specific platform and display purpose. +# - I keep the origianl logic of Brightness Class and just modified the main +# function in order to get test the specific display. In addition, I divide +# the brightness into five parts to have better test coverage. +# +# + +import sys +import os +import time +import math + +from argparse import ArgumentParser, RawTextHelpFormatter +from glob import glob + + +class Brightness(object): + def __init__(self, path='/sys/class/backlight'): + self.sysfs_path = path + self.interfaces = self._get_interfaces_from_path() + + def read_value(self, path): + '''Read the value from a file''' + # See if the source is a file or a file object + # and act accordingly + file = path + if file is None: + lines_list = [] + else: + # It's a file + if not hasattr(file, 'write'): + myfile = open(file, 'r') + lines_list = myfile.readlines() + myfile.close() + # It's a file object + else: + lines_list = file.readlines() + + return int(''.join(lines_list).strip()) + + def write_value(self, value, path, test=None): + '''Write a value to a file''' + value = '%d' % value + # It's a file + if not hasattr(path, 'write'): + if test: + path = open(path, 'a') + else: + path = open(path, 'w') + path.write(value) + path.close() + # It's a file object + else: + path.write(value) + + def get_max_brightness(self, path): + full_path = os.path.join(path, 'max_brightness') + + return self.read_value(full_path) + + def get_actual_brightness(self, path): + full_path = os.path.join(path, 'actual_brightness') + + return self.read_value(full_path) + + def get_last_set_brightness(self, path): + full_path = os.path.join(path, 'brightness') + + return self.read_value(full_path) + + def _get_interfaces_from_path(self): + '''check all the files in a directory looking for quirks''' + interfaces = [] + if os.path.isdir(self.sysfs_path): + for d in glob(os.path.join(self.sysfs_path, '*')): + if os.path.isdir(d): + interfaces.append(d) + + return interfaces + + def was_brightness_applied(self, interface): + '''See if the selected brightness was applied + + Note: this doesn't guarantee that screen brightness + changed. + ''' + if ( + abs(self.get_actual_brightness(interface) - + self.get_last_set_brightness(interface)) > 1 + ): + return 1 + else: + return 0 + + +def main(): + parser = ArgumentParser(formatter_class=RawTextHelpFormatter) + parser.add_argument( + "-p", "--platform", + help="Genio device platform type.", + choices=["G1200-evk", "G700", "G350"] + ) + parser.add_argument( + "-d", "--display", + choices=["dsi", "edp", "lvds"], + help="The type of built-in display" + ) + + args = parser.parse_args() + + tables = { + "G1200-evk": { + "dsi": "backlight-lcd0", + "edp": "backlight-lcd1", + "lvds": "backlight-lcd1", + }, + "G700": { + "dsi": "1c008000.dsi0.0", + "edp": "backlight-lcd0", + }, + "G350": { + "dsi": "14014000.dsi0.0", + }, + } + + # Make sure that we have root privileges + if os.geteuid() != 0: + print('Error: please run this program as root', + file=sys.stderr) + exit(1) + + print(f"Test the brightness of '{args.display}' display") + + target_interface = '' + try: + target_interface = tables[args.platform][args.display] + print(f"Interface: {target_interface}\n") + except KeyError: + raise SystemExit( + f"ERROR: no suitable interface of {args.display} display") + + brightness = Brightness() + + # If no backlight interface can be found + if len(brightness.interfaces) == 0: + raise SystemExit("ERROR: no brightness interfaces found") + + exit_status = 0 + find_target_display = False + print(f'Available Interfaces: {brightness.interfaces}') + for interface in brightness.interfaces: + if target_interface in interface: + find_target_display = True + # Get the current brightness which we can restore later + original_brightness = brightness.get_actual_brightness(interface) + print(f'Current brightness: {original_brightness}') + + # Get the maximum value for brightness + max_brightness = brightness.get_max_brightness(interface) + print(f'Maximum brightness: {max_brightness}\n') + + for m in [0, 0.25, 0.5, 0.75, 1]: + # Set the brightness to half the max value + current_brightness = math.ceil(max_brightness * m) + print(f'Set the brightness as {current_brightness}') + brightness.write_value( + current_brightness, + os.path.join(interface, 'brightness')) + + # Check that "actual_brightness" reports the same value we + # set "brightness" to + exit_status += brightness.was_brightness_applied(interface) + + # Wait a little bit before going back to the original value + time.sleep(2) + + # Set the brightness back to its original value + brightness.write_value( + original_brightness, + os.path.join(interface, 'brightness')) + print( + f'Set brightness back to original value: {original_brightness}' + ) + # Close the loop since the target display has been tested + break + + if not find_target_display: + raise SystemExit(f"ERROR: no {target_interface} interface be found") + raise SystemExit(exit_status) + + +if __name__ == '__main__': + main() diff --git a/contrib/genio/bin/cpu_idle.py b/contrib/genio/bin/cpu_idle.py new file mode 100755 index 0000000000..529709e6dc --- /dev/null +++ b/contrib/genio/bin/cpu_idle.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python3 + +import os +import argparse + + +GENERAL_PATH = 'cpu%d/cpuidle/state%d/%s' + + +def read_attr(attr): + path = os.path.join('/sys/devices/system/cpu', attr) + if not os.path.exists(path): + return '' + with open(path) as f: + tmp = f.read().strip() + return tmp + + +def read_attr_num(attr): + tmp = read_attr(attr) + if not tmp: + return -1 + + return int(tmp) + + +def read_idle_attr(cpu, state, attr): + return read_attr(GENERAL_PATH % (cpu, state, attr)) + + +def read_idle_attr_num(cpu, state, attr): + return read_attr_num(GENERAL_PATH % (cpu, state, attr)) + + +def error_handler(node_type, node_path, expect, reality): + if node_type == 'name' or node_type == 'disable': + print( + ( + f"Failed: " + f"the expected {node_type} value of node '{node_path}'" + f"should be '{expect}' but got '{reality}'" + ) + ) + if node_type == 'usage': + print( + ( + f"Failed: " + f"the expected usage value of node '{node_path}'" + f"should grater than 0" + ) + ) + + +def output_checker(cpu, state, name, disable, usage): + ''' + @param:name, type: tuple. (reality value, expected value) + @param:disable, type: tuple. (reality value, expected value) + @param:usage + ''' + fail = 0 + print(f'CPU node: cpu/{cpu}/cpuidle/state{state}') + print(f'Got name: {name[0]}, disable: {disable[0]}, usage: {usage}') + if name[0] != name[1]: + node_path = GENERAL_PATH.format(cpu, state, 'name') + error_handler('name', node_path, name[0], name[1]) + fail = 1 + if disable[0] != disable[1]: + node_path = GENERAL_PATH.format(cpu, state, 'disable') + error_handler('disable', node_path, disable[0], disable[1]) + fail = 1 + if usage <= 0: + node_path = GENERAL_PATH.format(cpu, state, 'usage') + error_handler('usage', node_path) + fail = 1 + if fail: + exit(1) + + +def test_wfi(): + cpu = 0 + state = 0 + name = read_idle_attr(cpu, state, 'name') + disable = read_idle_attr(cpu, state, 'disable') + usage = read_idle_attr_num(cpu, state, 'usage') + output_checker( + cpu, + state, + name=(name, 'WFI'), + disable=(disable, '0'), + usage=usage + ) + + +def test_mcdi_cpu(soc): + if soc != 'mt8365': + print(f"Isn't supported for '{soc}'") + return + + cpu = 0 + state = 1 + name = read_idle_attr(cpu, state, 'name') + disable = read_idle_attr(cpu, state, 'disable') + usage = read_idle_attr_num(cpu, state, 'usage') + output_checker( + cpu, + state, + name=(name, 'mcdi-cpu'), + disable=(disable, '0'), + usage=usage + ) + + +def test_mcdi_cluster(soc): + if soc != 'mt8365': + print(f"Isn't supported for '{soc}'") + return + + cpu = 0 + state = 2 + name = read_idle_attr(cpu, state, 'name') + disable = read_idle_attr(cpu, state, 'disable') + usage = read_idle_attr_num(cpu, state, 'usage') + output_checker( + cpu, + state, + name=(name, 'mcdi-cluster'), + disable=(disable, '0'), + usage=usage + ) + + +def test_dpidle(soc): + if soc != 'mt8365': + print(f"Isn't supported for '{soc}'") + return + + cpu = 0 + state = 3 + name = read_idle_attr(cpu, state, 'name') + disable = read_idle_attr(cpu, state, 'disable') + usage = read_idle_attr_num(cpu, state, 'usage') + output_checker( + cpu, + state, + name=(name, 'dpidle'), + disable=(disable, '0'), + usage=usage + ) + + +def test_clusteroff_l(soc): + if soc == 'mt8365': + print(f"Isn't supported for '{soc}'") + return + + cpu = 0 + state = 2 + name = read_idle_attr(cpu, state, 'name') + disable = read_idle_attr(cpu, state, 'disable') + usage = read_idle_attr_num(cpu, state, 'usage') + output_checker( + cpu, + state, + name=(name, 'clusteroff-l' if soc == 'mt8390' else 'clusteroff_l'), + disable=(disable, '0'), + usage=usage + ) + + +def test_clusteroff_b(soc): + if soc == 'mt8365': + print(f"Isn't supported for '{soc}'") + return + + cpu = 6 if soc == 'mt8390' else 4 + state = 2 + name = read_idle_attr(cpu, state, 'name') + disable = read_idle_attr(cpu, state, 'disable') + usage = read_idle_attr_num(cpu, state, 'usage') + output_checker( + cpu, + state, + name=(name, 'clusteroff-b' if soc == 'mt8390' else 'clusteroff_b'), + disable=(disable, '0'), + usage=usage + ) + + +def test_cpuoff_l(soc): + if soc == 'mt8365': + print(f"Isn't supported for '{soc}'") + return + + cpu = 0 + state = 1 + name = read_idle_attr(cpu, state, 'name') + disable = read_idle_attr(cpu, state, 'disable') + usage = read_idle_attr_num(cpu, state, 'usage') + output_checker( + cpu, + state, + name=(name, 'cpuoff-l' if soc == 'mt8390' else 'cpuoff_l'), + disable=(disable, '0'), + usage=usage + ) + + +def test_cpuoff_b(soc): + if soc == 'mt8365': + print(f"Isn't supported for '{soc}'") + return + + cpu = 6 if soc == 'mt8390' else 4 + state = 1 + name = read_idle_attr(cpu, state, 'name') + disable = read_idle_attr(cpu, state, 'disable') + usage = read_idle_attr_num(cpu, state, 'usage') + output_checker( + cpu, + state, + name=(name, 'cpuoff-b' if soc == 'mt8390' else 'cpuoff_b'), + disable=(disable, '0'), + usage=usage + ) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + 'soc', + help='SoC type. e.g mt8395', + choices=['mt8395', 'mt8390', 'mt8365'] + ) + parser.add_argument( + '-c', '--case', + help='The available cases of CPU Idle', + choices=[ + 'wfi', 'mcdi-cpu', 'mcdi-cluster', 'dpidle', 'clusteroff-l', + 'clusteroff-b', 'cpuoff-l', 'cpuoff-b' + ], + type=str, + required=True + ) + args = parser.parse_args() + if args.case == 'wfi': + test_wfi() + if args.case == 'mcdi-cpu': + test_mcdi_cpu(args.soc) + if args.case == 'mcdi-cluster': + test_mcdi_cluster(args.soc) + if args.case == 'dpidle': + test_dpidle(args.soc) + if args.case == 'clusteroff-l': + test_clusteroff_l(args.soc) + if args.case == 'clusteroff-b': + test_clusteroff_b(args.soc) + if args.case == 'cpuoff-l': + test_cpuoff_l(args.soc) + if args.case == 'cpuoff-b': + test_cpuoff_b(args.soc) + + +if __name__ == '__main__': + main() diff --git a/contrib/genio/bin/dvfs_gpu_check_governors.py b/contrib/genio/bin/dvfs_gpu_check_governors.py new file mode 100755 index 0000000000..4e6b5aa932 --- /dev/null +++ b/contrib/genio/bin/dvfs_gpu_check_governors.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 + +import argparse + +GOVERNORS = ['userspace', 'powersave', 'performance', 'simple_ondemand'] +print(f'Expected Governos: {GOVERNORS}') + + +def test_sysfs_attrs_read(soc): + fail = 0 + mail_type = '13000000.mali' + if soc == 'mt8365': + mail_type = '13040000.mali' + node_path = ( + f'/sys/devices/platform/soc/{mail_type}/devfreq/{mail_type}/' + f'available_governors' + ) + + with open(node_path) as f: + for node in f.read().strip().split(): + if node not in GOVERNORS: + fail = 1 + print(f"Failed: found governor '{node}' out of expextation") + return fail + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + 'soc', + help='SoC type. e.g mt8395', + choices=['mt8395', 'mt8390', 'mt8365'] + ) + args = parser.parse_args() + ret = test_sysfs_attrs_read(args.soc) + if ret: + exit(1) + print('Pass') + + +if __name__ == '__main__': + main() diff --git a/contrib/genio/bin/gpio_loopback_test.py b/contrib/genio/bin/gpio_loopback_test.py new file mode 100755 index 0000000000..2f042a2e1a --- /dev/null +++ b/contrib/genio/bin/gpio_loopback_test.py @@ -0,0 +1,249 @@ +#!/bin/python3 +import time +import os +from argparse import ArgumentParser, RawTextHelpFormatter + + +class GPIOSysFsController(): + + TEST_STATES = (0, 1) + ROOT_PATH = "/sys/class/gpio" + + def __init__(self): + pass + + def get_gpio_base_number(self): + """ Get the base number of GPIO chip + """ + print("Get GPIO Chips info") + with open("/sys/kernel/debug/gpio", "r") as fp: + value = fp.read().strip() + print(value) + gpiochips = [i for i in value.split('\n') if 'gpiochip' in i] + gpiochip_base_number_dict = {} + for gc in gpiochips: + gc_splits = gc.split(' ') + gpiochip = gc_splits[0].replace(':', '') + base_number = gc_splits[2].split('-')[0] + gpiochip_base_number_dict.update({gpiochip: base_number}) + + print('\n\nGPIO chip base number mapping:') + print(gpiochip_base_number_dict) + return gpiochip_base_number_dict + + def run_test( + self, + output_gpio_chip_number, + input_gpio_chip_number, + physical_output_port, + physical_input_port, + gpio_output_pin, + gpio_input_pin + ): + """Launch GPIO test + + Args: + output_gpio_chip_number (str): the number of output gpio chip + e.g. 0 + input_gpio_chip_number (str): the number of input gpio chip + e.g. 3 + physical_output_port (str): the name or physical port number for + output, it's used to provide a human readable content only + e.g. J3, pin26 + physical_input_port (str): the name or physical port number for + input, it's used to provide a human readable content only + e.g. J7, pin27 + gpio_output_pin (str): the gpio pin number of output. This value + means the real pin number of target GPIO. You can get this + information from Schematic or User Guide of the DUT + gpio_input_pin (str): the gpio pin number of intput. This value + means the real pin number of target GPIO. You can get this + information from Schematic or User Guide of the DUT + + Raises: + SystemExit: exit with the test result + """ + base_number_mapping = self.get_gpio_base_number() + output_base_number = int( + base_number_mapping['gpiochip{}'.format(output_gpio_chip_number)]) + input_base_number = int( + base_number_mapping['gpiochip{}'.format(input_gpio_chip_number)]) + output_pin_number = output_base_number + int(gpio_output_pin) + input_pin_number = input_base_number + int(gpio_input_pin) + print("\nOutput Base Number: {}".format(output_base_number)) + print("Input Base Number: {}".format(input_base_number)) + print("Physical output port: {}, GPIO number: {}".format( + physical_output_port, gpio_output_pin)) + print("Physical input port: {}, GPIO number {}".format( + physical_input_port, gpio_input_pin)) + print("Output Pin Number: {} + Base Number = {}".format( + gpio_output_pin, output_pin_number)) + print("Input Pin Number: {} + Base Number = {}".format( + gpio_input_pin, input_pin_number)) + print("\n# Start GPIO loopback test") + raise SystemExit( + not self.loopback_test(output_pin_number, input_pin_number)) + + def check_gpio_node(self, port): + """Check the GPIO port is exists + + Args: + port (str): the gpio port + """ + return os.path.exists("{}/gpio{}".format(self.ROOT_PATH, port)) + + def set_gpio(self, port, value): + """Write the value to GPIO port + + Args: + port (str): the gpio port + value (str): 0 or 1 + """ + print("# Set GPIO {} value to {}".format(port, value)) + with open("{}/gpio{}/value".format(self.ROOT_PATH, port), "wt") as fp: + fp.write("{}\n".format(value)) + + def read_gpio(self, port): + """Read the value from GPIO port + + Args: + port (str): the gpio port + + Returns: + value (str): the value of gpio port + """ + with open("{}/gpio{}/value".format(self.ROOT_PATH, port), "r") as fp: + value = fp.read().strip() + print("# Read GPIO {}, value is {}".format(port, value)) + return value + + def set_direction(self, port, value): + """Set direction for GPIO port + + Args: + port (str): the gpio port + direction (str): the direction of gpio port + """ + print("# Set GPIO {} direction to {}".format(port, value)) + with open( + "{}/gpio{}/direction".format(self.ROOT_PATH, port), + "w" + ) as fp: + fp.write("{}\n".format(value)) + + def configure_gpio(self, port, direction): + """Initial and configure GPIO port + + Args: + port (str): the gpio port + direction (str): the direction of gpio port + + Raises: + IOError: raise error if any issue + """ + try: + # Export GPIO + if not self.check_gpio_node(port): + with open("{}/export".format(self.ROOT_PATH), "w") as fexport: + fexport.write("{}\n".format(port)) + + if not self.check_gpio_node(port): + raise SystemExit("Failed to export GPIO {}\n".format(port)) + + # Set direction + self.set_direction(port, direction) + except Exception as err: + raise IOError( + "{} \nError: Failed to configure GPIO {} to {}".format + (err, port, direction)) + + def loopback_test(self, out_port, in_port): + """Launch GPIO loopback test + + Args: + out_port (str): the gpio port of output + in_port (str): the gpio port of input + + Returns: + result (bool): the test result + """ + result = True + self.configure_gpio(out_port, "out") + self.configure_gpio(in_port, "in") + + for state in self.TEST_STATES: + print("Try to send and receivce {}".format(state)) + value = self.read_gpio(in_port) + print("The initial input GPIO {}'s value is {}".format( + in_port, value)) + + self.set_gpio(out_port, state) + time.sleep(1) + real_state = self.read_gpio(in_port) + + if int(real_state) != state: + str_match = "mismatch" + result = False + else: + str_match = "match" + print("# Digital state {}. expected: {} real: {}\n".format( + str_match, state, real_state) + ) + return result + + +def main(): + parser = ArgumentParser(formatter_class=RawTextHelpFormatter) + parser.add_argument( + "-oc", "--output_gpio_chip_number", + help="Provide the target gpio chip number for output.", + default=0 + ) + parser.add_argument( + "-ic", "--input_gpio_chip_number", + help="Provide the target gpio chip number for input.", + default=0 + ) + parser.add_argument( + "-po", "--physical_output_port", + help=( + "Provide the physical output port number/name." + " It's used to provide a human readable content only" + ) + ) + parser.add_argument( + "-pi", "--physical_input_port", + help=( + "Provide the physical input port number/name." + " It's used to provide a human readable content only" + ) + ) + parser.add_argument( + "-go", "--gpio_output_pin", + help=( + "Provide the output gpio pin number. You can get this information" + " from Schematic or User Guide of the DUT" + ) + ) + parser.add_argument( + "-gi", "--gpio_input_pin", + help=( + "Provide the output gpio pin number. You can get this information" + " from Schematic or User Guide of the DUT" + ) + ) + args = parser.parse_args() + + obj = GPIOSysFsController() + obj.run_test( + args.output_gpio_chip_number, + args.input_gpio_chip_number, + args.physical_output_port, + args.physical_input_port, + args.gpio_output_pin, + args.gpio_input_pin + ) + + +if __name__ == "__main__": + main() diff --git a/contrib/genio/bin/hdmirx_output_checker.sh b/contrib/genio/bin/hdmirx_output_checker.sh new file mode 100755 index 0000000000..bbbe6c6575 --- /dev/null +++ b/contrib/genio/bin/hdmirx_output_checker.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# This script is used to verify the output of the mtk_hdmirx_tool. + +verify_check_cable_output() { + # $1 is the log file who records the output of hdmi_rx tool + # $2 is the expected connection status. Such as "hdmi connected" or "hdmi disconnected" + echo "Checking the status of HDMI connection ..." + if ! grep -qw "$2" "$1" ; then + echo " - FAIL: expect the status of HDMI connection to be '$2'" + exit 1 + fi + echo " - PASS: the status of HDMI connection is '$2'" +} + +verify_check_video_locked_output() { + # $1 is the log file who records the output of hdmi_rx tool + # $2 is the expected lock status of video. Such as "video locked" or "video unlocked" + if ! grep -qw "$2" "$1" ; then + echo " - FAIL: expect the status of Video Lock to be '$2'" + exit 1 + fi + echo " - PASS: the status of Video Lock is '$2'" +} + +verify_get_video_info_output() { + # $1 is the log file who records the output of hdmi_rx tool + # $2 is a string contains all expected values of v.hactive, v.vactive and v.frame_rate + + EXIT=0 + candidate_attributes=( "v.hactive" "v.vactive" "v.frame_rate" ) + IFS=' ' read -r -a expected_values <<< "$2" + for index in "${!candidate_attributes[@]}"; + do + echo "Checking the value of ${candidate_attributes[$index]} should be ${expected_values[$index]} ..." + if ! grep -qw "${candidate_attributes[$index]} = ${expected_values[$index]}" "$1" ; then + echo " - FAIL" + EXIT=1 + else + echo " - PASS" + fi + done + exit $EXIT +} + +verify_get_audio_info_output() { + # $1 is the log file who records the output of hdmi_rx tool + # $2 is a string contains all expected values of "Bit Depth", "Channel Number" and "Sample Frequency" + # - usage example: + # expected_values="24 bits, Channel Number [2], 48.0 kHz" + # hdmirx_output_checker.sh verify_get_audio_info_output "${expected_values}" + EXIT=0 + candidate_sections=( "Audio Bits" "Audio Channel Info" "Audio Sample Freq" ) + IFS=',' read -r -a expected_values <<< "$2" + for index in "${!candidate_sections[@]}"; + do + echo "Checking the value '${expected_values[$index]}' should exist in ${candidate_sections[$index]} section ..." + if ! grep -qw "${expected_values[$index]}" "$1" ; then + echo " - FAIL" + EXIT=1 + else + echo " - PASS" + fi + done + exit $EXIT +} + +help_function() { + echo "This script is used to verify the output of hdmixrx_tool." + echo + echo "Usage: hdmirx_output_checker.sh " + echo + echo "Log Path: A specific log path of stored output of hdmirx_tool" + echo + echo "Actions:" + echo " verify_check_cable_output" + echo " verify_check_video_locked_output" + echo " verify_get_video_info_output" + echo " verify_get_audio_info_output" +} + +main(){ + # $1 is the log file who records the output of hdmi_rx tool + case ${2} in + verify_check_cable_output) verify_check_cable_output "${1}" "${3}" ;; + verify_check_video_locked_output) verify_check_video_locked_output "${1}" "${3}" ;; + verify_get_video_info_output) verify_get_video_info_output "${1}" "${3}" ;; + verify_get_audio_info_output) verify_get_audio_info_output "${1}" "${3}" ;; + *) help_function; exit + esac +} + +main "$@" diff --git a/contrib/genio/bin/hdmirx_tool_runner.sh b/contrib/genio/bin/hdmirx_tool_runner.sh new file mode 100755 index 0000000000..b7b257c168 --- /dev/null +++ b/contrib/genio/bin/hdmirx_tool_runner.sh @@ -0,0 +1,124 @@ +#!/bin/bash + +# This script is used to interact with the mtk_hdmirx_tool. You can find the source +# code of mtk_hdmi_rx from the following link: +# - https://gitlab.com/mediatek/aiot/bsp/mtk-hdmirx-tool +# +# The following output is the first glance of hdmi-rx-tool +# User can choose any action to manipulate with it. +# +# root@mtk-genio:/home/ubuntu# genio-test-tool.hdmi-rx-tool +# hdmirx tool version: 1.0.0 +# hdmirx driver version: 1.0.0 +# +# 1) enable hdmi 2) disable hdmi +# 3) get device info 4) check cable +# 5) get video info 6) check video locked +# 7) get audio info 8) check audio locked +# a) start observing b) stop observing +# h) help q) quit +# + + +run_expect() { +# $1 is the timeout value, once it occurs, the process of expect will be closed automatically +# $2 is the available action options provided by hdmi-rx-tool + expect -c " + # Initialization + set timeout $1 + log_file $LOG_PATH + spawn genio-test-tool.hdmi-rx-tool + sleep 0.5 + send \"\r\" + expect getchar= + + # Send command with specific action + send \"$2\r\" + + # Block until timeout + expect pending + " +} + +enable_hdmi() { + # Timeout 1 second and perform number 1 action + run_expect 1 1 +} + +disable_hdmi() { + # Timeout 1 second and perform number 2 action + run_expect 1 2 +} + +get_device_info() { + # Timeout 1 second and perform number 3 action + run_expect 1 3 +} + +check_cable() { + # Timeout 1 second and perform number 4 action + run_expect 1 4 +} + +get_video_info() { + # Timeout 1 second and perform number 5 action + run_expect 1 5 +} + +check_video_locked() { + # Timeout 1 second and perform number 6 action + run_expect 1 6 +} + +get_audio_info() { + # Timeout 1 second and perform number 7 action + run_expect 1 7 +} + +check_audio_locked() { + # Timeout 1 second and perform number 8 action + run_expect 1 8 +} + +start_observing() { + # Timeout 15 seconds and perform number a action + # It will monitor the event while plugging/unplugging HDMI cable to HDMI RX port on DUT + run_expect 15 a +} + +help_function() { + echo "This script is used to interact with genio-test-tool.hdmi-rx-tool" + echo + echo "Usage: hdmirx_tool_runner.sh " + echo + echo "Log Path: A specific path for storing the output of hdmi-rx-tool" + echo + echo "Actions:" + echo " enable_hdmi" + echo " disable_hdmi" + echo " get_device_info" + echo " check_cable" + echo " get_video_info" + echo " check_video_locked" + echo " get_audio_info" + echo " check_audio_locked" + echo " start_observing" +} + +main(){ + LOG_PATH=${1} + case ${2} in + enable_hdmi) enable_hdmi ;; + disable_hdmi) disable_hdmi ;; + get_device_info) get_device_info ;; + check_cable) check_cable ;; + get_video_info) get_video_info ;; + check_video_locked) check_video_locked ;; + get_audio_info) get_audio_info ;; + check_audio_locked) check_audio_locked ;; + start_observing) start_observing ;; + *) help_function; exit + esac +} + +main "$@" diff --git a/contrib/genio/bin/linux_ccf.py b/contrib/genio/bin/linux_ccf.py new file mode 100755 index 0000000000..db2d6d08ab --- /dev/null +++ b/contrib/genio/bin/linux_ccf.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +import os +import argparse +import subprocess + +PLAINBOX_SESSION_SHARE = os.environ.get('PLAINBOX_SESSION_SHARE') +if not PLAINBOX_SESSION_SHARE: + print("no env var PLAINBOX_SESSION_SHARE") + exit(1) + +PLAINBOX_PROVIDER_DATA = os.environ.get('PLAINBOX_PROVIDER_DATA') +if not PLAINBOX_PROVIDER_DATA: + print("no env var PLAINBOX_PROVIDER_DATA") + exit(1) + + +def runcmd(command): + ret = subprocess.run( + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding="utf-8", + timeout=1 + ) + return ret + + +def test_linux_ccf(soc): + if soc == 'mt8365': + print('mt8365 is not supported') + exit(1) + + clk_summary_path = f"{PLAINBOX_SESSION_SHARE}/clk-summary.txt" + cat_ret = runcmd( + [f"cat /sys/kernel/debug/clk/clk_summary | tee {clk_summary_path}"]) + + if cat_ret.returncode: + print(f'Failed: unable to dump clk_summary data to {clk_summary_path}') + exit(1) + print('Dump /sys/kernel/debug/clk/clk_summary:') + print(cat_ret.stdout) + + if soc == 'mt8390': + verify_ret = runcmd([ + ( + f"verify-mt8188-ccf.sh" + f" -t {PLAINBOX_PROVIDER_DATA}/linux-ccf/mt8188-clk.h" + f" -s {clk_summary_path}" + ) + ]) + elif soc == 'mt8395' or soc == 'mt8195': + verify_ret = runcmd([ + ( + f"verify-mt8195-ccf.sh" + f" -t {PLAINBOX_PROVIDER_DATA}/linux-ccf/mt8195-clk.h" + f" -s {clk_summary_path}" + ) + ]) + + if verify_ret.returncode: + print(f'Failed: {verify_ret.stdout}') + exit(1) + if verify_ret.stdout.split('\n')[0] \ + == '[-] Success, all clocks are mapped !': + print('Test Pass') + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + 'soc', + help='SoC type. e.g mt8395', + choices=['mt8395', 'mt8390'] + ) + args = parser.parse_args() + test_linux_ccf(args.soc) + + +if __name__ == '__main__': + main() diff --git a/contrib/genio/bin/pmic_regulator.py b/contrib/genio/bin/pmic_regulator.py new file mode 100755 index 0000000000..2419071d08 --- /dev/null +++ b/contrib/genio/bin/pmic_regulator.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 + +import os +import argparse +import sys + + +MAIN_REGULATORS = ( + 'vs1', 'vgpu11', 'vmodem', 'vpu', 'vcore', 'vs2', 'vpa', 'vproc2', + 'vproc1', 'vgpu11_sshub', 'vaud18', 'vsim1', 'vibr', 'vrf12', 'vusb', + 'vsram_proc2', 'vio18', 'vcamio', 'vcn18', 'vfe28', 'vcn13', + 'vcn33_1_bt', 'vcn33_1_wifi', 'vaux18', 'vsram_others', 'vefuse', 'vxo22', + 'vrfck', 'vbif28', 'vio28', 'vemc', 'vcn33_2_bt', 'vcn33_2_wifi', 'va12', + 'va09', 'vrf18', 'vsram_md', 'vufs', 'vm18', 'vbbck', 'vsram_proc1', + 'vsim2', 'vsram_others_sshub') +mt8365_MAIN_REGULATORS = ( + 'vproc', 'vcore', 'vmodem', 'vs1', 'vpa', 'vfe28', 'vxo22', 'vrf18', + 'vrf12', 'vefuse', 'vcn33-bt', 'vcn33-wifi', 'vcn28', 'vcn18', 'vcama', + 'vcamd', 'vcamio', 'vldo28', 'vsram-others', 'vsram-proc', 'vaux18', + 'vaud28', 'vio28', 'vio18', 'vdram', 'vmc', 'vmch', 'vemc', 'vsim1', + 'vsim2', 'vibr', 'vusb33') + + +def read_attr(attr): + path = os.path.join('/sys/class/regulator', attr) + if not os.path.exists(path): + return '' + with open(path) as f: + tmp = f.read().strip() + return tmp + + +def read_attr_name(attr): + tmp = read_attr(attr) + if not tmp: + return -1 + + return tmp + + +def read_name(reg): + return read_attr_name('regulator.%d/name' % reg) + + +def read_all_name(): + tmp = [] + i = 0 + while True: + t = read_name(i) + if t == -1: + break + + tmp.append(t) + i += 1 + + return set(tmp) + + +def test_regulator(soc): + missing_node = False + expect_set = mt8365_MAIN_REGULATORS if soc == 'mt8365' else MAIN_REGULATORS + current_set = read_all_name() + for node in expect_set: + print(f'Checking the \'{node}\' node exists in System...') + if node not in current_set: + missing_node = True + print( + f' - ERROR: expect the \'{node}\' node exist but it doesn\'t') + + if missing_node: + print('Test Fail') + sys.exit(1) + print('Test Pass') + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + 'soc', + help='SoC type. e.g mt8395', + choices=['mt8395', 'mt8390', 'mt8365'] + ) + args = parser.parse_args() + test_regulator(args.soc) + + +if __name__ == '__main__': + main() diff --git a/contrib/genio/bin/serialcheck.py b/contrib/genio/bin/serialcheck.py new file mode 100755 index 0000000000..2468fb8f3c --- /dev/null +++ b/contrib/genio/bin/serialcheck.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 + +import argparse +import subprocess +import os + + +def runcmd(command): + ret = subprocess.run( + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding="utf-8", + ) + return ret + + +def test_uart_by_serialcheck(soc): + base_path = os.environ.get('PLAINBOX_SESSION_SHARE', '/tmp') + file_path = f'{base_path}/binary' + runcmd([f'dd if=/dev/urandom of={file_path} count=1 bs=4096']) + + golden_msg = ( + 'cts: 0 dsr: 0 rng: 0 dcd: 0 rx: 12288' + ' tx: 12288 frame 0 ovr 0 par: 0 brk: 0 buf_ovrr: 0' + ) + print('Golden Sample:') + print(golden_msg) + + tty_node = 'ttyS1' if soc == 'mt8395' else 'ttyS2' + cmd = 'genio-test-tool.serialcheck -d /dev/{} -f {} -m d -l 3 -b {}' + + available_baudrate = [ + 3000000, 2000000, 921600, 576000, 460800, 230400, 115200, 57600, + 38400, 19200, 9600, 4800, 2400, 1200, 600, 300, 110 + ] + + fail = 0 + for br in available_baudrate: + print('\n' + '*' * 80) + print(f'Testing baudrate: {br}\n') + ret = runcmd([cmd.format(tty_node, file_path, br)]) + print(ret.stdout) + if ret.returncode != 0 or ret.stdout.split('\n')[-2] != golden_msg: + fail = 1 + print('Fail: the output doesn\'t match the golden sample') + + raise SystemExit(fail) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + 'soc', + help='SoC type. e.g mt8395', + choices=['mt8395', 'mt8390', 'mt8365'] + ) + args = parser.parse_args() + test_uart_by_serialcheck(args.soc) + + +if __name__ == '__main__': + main() diff --git a/contrib/genio/bin/set_as_performance_mode.sh b/contrib/genio/bin/set_as_performance_mode.sh new file mode 100755 index 0000000000..a081d78961 --- /dev/null +++ b/contrib/genio/bin/set_as_performance_mode.sh @@ -0,0 +1,192 @@ +#!/bin/bash +set -e + +STORE_HOUSE=/var/tmp/performance-setting + +if [ -d "${PLAINBOX_SESSION_SHARE}" ]; then + STORE_HOUSE="${PLAINBOX_SESSION_SHARE}"/performance-setting +fi + +if ! [ -d "$STORE_HOUSE" ]; then + mkdir "$STORE_HOUSE" +fi + +MALI_SOC="13000000.mali" + +get_current_setting() { +# $1 is the specific device platform. e.g. G1200-evk + echo "===== Current Configuration =====" + if [ "${1}" == "G1200-evk" ]; then + for i in 0 4 + do + echo "- /sys/devices/system/cpu/cpufreq/policy$i/scaling_governor:" + cat /sys/devices/system/cpu/cpufreq/policy$i/scaling_governor + done + elif [ "${1}" == "G700" ]; then + for i in 0 6 + do + echo "- /sys/devices/system/cpu/cpufreq/policy$i/scaling_governor:" + cat /sys/devices/system/cpu/cpufreq/policy$i/scaling_governor + done + for i in {0..2} + do + # shellcheck disable=SC2027 + echo "- /sys/class/thermal/thermal_zone0/trip_point_"$i"_temp" + cat /sys/class/thermal/thermal_zone0/trip_point_"$i"_temp + done + elif [ "${1}" == "G350" ]; then + for i in {0..3} + do + echo "- /sys/devices/system/cpu/cpu$i/cpufreq/scaling_governor" + cat /sys/devices/system/cpu/cpu"$i"/cpufreq/scaling_governor + done + fi + echo "- /sys/devices/platform/soc/$MALI_SOC/devfreq/$MALI_SOC/governor:" + cat /sys/devices/platform/soc/"$MALI_SOC"/devfreq/"$MALI_SOC"/governor + echo "- /sys/class/thermal/thermal_zone0/mode:" + cat /sys/class/thermal/thermal_zone0/mode + echo +} + +store_setting() { +# $1 is the specific device platform. e.g. G1200-evk + echo "===== Store current config into ${STORE_HOUSE} directory =====" + if [ "${1}" == "G1200-evk" ]; then + for i in 0 4 + do + cat /sys/devices/system/cpu/cpufreq/policy$i/scaling_governor > "$STORE_HOUSE"/p"$i"_sg + done + elif [ "${1}" == "G700" ]; then + for i in 0 6 + do + cat /sys/devices/system/cpu/cpufreq/policy$i/scaling_governor > "$STORE_HOUSE"/p"$i"_sg + done + for i in {0..2} + do + cat /sys/class/thermal/thermal_zone0/trip_point_"$i"_temp > "$STORE_HOUSE"/"$i"_temp + done + elif [ "${1}" == "G350" ]; then + cat /sys/devices/system/cpu/cpufreq/policy0/scaling_governor > "$STORE_HOUSE"/p0_sg + fi + cat /sys/devices/platform/soc/"$MALI_SOC"/devfreq/"$MALI_SOC"/governor > "$STORE_HOUSE"/mali_g + echo "Store Done" + echo +} + +set_to_performance_mode() { +# $1 is the specific device platform. e.g. g1200-evk + echo "===== Set to performance mode =====" + if [ "${1}" == "G1200-evk" ]; then + for i in 0 4 + do + echo "performance" > /sys/devices/system/cpu/cpufreq/policy$i/scaling_governor + done + # disable cpuidle + toggle_cpuidle_state_disable 2 7 set_1 + echo disabled > /sys/class/thermal/thermal_zone0/mode + elif [ "${1}" == "G700" ]; then + for i in 0 6 + do + echo "performance" > /sys/devices/system/cpu/cpufreq/policy$i/scaling_governor + done + # disable cpuidle + toggle_cpuidle_state_disable 2 7 set_1 + for i in {0..2} + do + echo "115000" > /sys/class/thermal/thermal_zone0/trip_point_"$i"_temp + done + elif [ "${1}" == "G350" ]; then + echo "performance" > /sys/devices/system/cpu/cpufreq/policy0/scaling_governor + echo disabled > /sys/class/thermal/thermal_zone0/mode + fi + echo "performance" > /sys/devices/platform/soc/"$MALI_SOC"/devfreq/"$MALI_SOC"/governor + echo "Setting Done" + echo +} + +back_to_original_mode_from_performance() { +# $1 is the specific device platform. e.g. g1200-evk + echo "===== Set back to original mode =====" + if [ "${1}" == "G1200-evk" ]; then + for i in 0 4 + do + cat "$STORE_HOUSE"/p"$i"_sg > /sys/devices/system/cpu/cpufreq/policy$i/scaling_governor + done + # enable cpuidle + toggle_cpuidle_state_disable 2 7 set_0 + echo enabled > /sys/class/thermal/thermal_zone0/mode + elif [ "${1}" == "G700" ]; then + for i in 0 6 + do + cat "$STORE_HOUSE"/p"$i"_sg > /sys/devices/system/cpu/cpufreq/policy$i/scaling_governor + done + # enable cpuidle + toggle_cpuidle_state_disable 2 7 set_0 + for i in {0..2} + do + cat "$STORE_HOUSE"/"$i"_temp > /sys/class/thermal/thermal_zone0/trip_point_"$i"_temp + done + elif [ "${1}" == "G350" ]; then + cat "$STORE_HOUSE"/p0_sg > /sys/devices/system/cpu/cpufreq/policy0/scaling_governor + echo enabled > /sys/class/thermal/thermal_zone0/mode + fi + cat "$STORE_HOUSE"/mali_g > /sys/devices/platform/soc/"$MALI_SOC"/devfreq/"$MALI_SOC"/governor + echo "Setting Done" + echo +} + +toggle_cpuidle_state_disable() { +# $1 is the count of /sys/devices/system/cpu/cpuX/cpuidle/state"$1" +# $2 is the count of /sys/devices/system/cpu/cpu"$2" +# $3 is the action to enable, diable the value of disable attribute for each cpu cpuidle state. {set_1 | set_0}, default: set_0 +for (( j=0;j<=${1};j++ )) +do + for (( i=0;i<=${2};i++ )) + do + value_to_be_set=0 + if [ "${3}" == "set_1" ]; then + value_to_be_set=1 + fi + echo $value_to_be_set > /sys/devices/system/cpu/cpu"$i"/cpuidle/state"$j"/disable + done +done +} + + +main() { +# $1 is the specific device platform. e.g. G1200-evk +# $2 is the action. {set-to-performance | reset} + SUPPORTED_DEVICES=("G1200-evk" "G700" "G350") + FIND_DEIVCE=0 + for device in "${SUPPORTED_DEVICES[@]}"; do + if [ "${1}" == "$device" ]; then + export FIND_DEIVCE=1 + fi + done + + if [ $FIND_DEIVCE -eq 0 ]; then + echo "Device: ${1} is not supported." + exit 1 + fi + + if [ "${1}" == "G350" ]; then + MALI_SOC="13040000.mali" + fi + + case ${2} in + set-to-performance) + get_current_setting "${1}" + store_setting "${1}" + set_to_performance_mode "${1}" + get_current_setting "${1}" + ;; + reset) + get_current_setting "${1}" + back_to_original_mode_from_performance "${1}" + get_current_setting "${1}" + ;; + *) echo "Action is not supported. Available options: { set-to-performance | reset | show }" + esac +} + +main "$@" diff --git a/contrib/genio/bin/spidev_test.py b/contrib/genio/bin/spidev_test.py new file mode 100755 index 0000000000..d4816cba52 --- /dev/null +++ b/contrib/genio/bin/spidev_test.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 + +import os +import argparse +import subprocess + +PLAINBOX_PROVIDER_DATA = os.environ.get('PLAINBOX_PROVIDER_DATA') + + +def runcmd(command): + ret = subprocess.run( + command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding="utf-8", + timeout=1 + ) + return ret + + +def check_spi_node(path): + print("Checking whether SPI node {} exists".format(path)) + if os.path.exists(path): + print("PASS: SPI node {} exist!\n".format(path)) + else: + raise SystemExit("ERROR: SPI node {} does NOT exist!".format(path)) + + +def test_spi_content_consistency(platform): + spi_path = '/dev/spidev0.0' + if platform == 'G1200-evk': + spi_path = '/dev/spidev1.0' + + check_spi_node(spi_path) + + test_bin_path = f'{PLAINBOX_PROVIDER_DATA}/spi/test.bin' + cmd = ( + f'genio-test-tool.spidev-test -D' + f' {spi_path} -s 400000 -i {test_bin_path} -v' + ) + print(f'Run command: {cmd}\n') + spi_ret = runcmd([cmd]) + print(spi_ret.stdout) + + packets = spi_ret.stdout.split('\n') + if not len(packets): + raise SystemExit( + 'ERROR: no any output be reported') + for rx, tx in zip(packets[-2:-1], packets[-3:-2]): + tx_content = tx.split('|')[2] + rx_content = rx.split('|')[2] + if tx_content != rx_content: + raise SystemExit( + 'ERROR: the content is not consistent between TX and RX') + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + 'platform', + help='Device platform. e.g G1200-evk', + choices=['G1200-evk', 'G700', 'G350'] + ) + args = parser.parse_args() + test_spi_content_consistency(args.platform) + + +if __name__ == "__main__": + main() diff --git a/contrib/genio/bin/verify-mt8188-ccf.sh b/contrib/genio/bin/verify-mt8188-ccf.sh new file mode 100755 index 0000000000..7518660f62 --- /dev/null +++ b/contrib/genio/bin/verify-mt8188-ccf.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +# Author: Amjad Ouled-Ameur + +CLK_TABLE= +CLK_SUMMARY= +NR_MISSING_CLKS= + +if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then + printf "verify-ccf -t clock-table.h -s clk_summary\n\n" + printf "options:\n" + printf "" + printf " -t, --table Clock table from the source code\n" + printf " -s, --summary Output of \"/sys/kernel/debug/clk/clk_summary\"\n" + exit +fi + +while [[ $# -gt 0 ]]; do + key="$1" + + case $key in + -t|--table) + CLK_TABLE="$2" + shift # past argument + shift # past value + ;; + -s|--summary) + CLK_SUMMARY="$2" + shift + shift + ;; + *) # unknown option + shift # past argument + ;; + esac +done + +# Extract clocks names from clock table header +grep -oP '(?<=#define CLK_).*' "${CLK_TABLE}" | awk '{print tolower($1)}' > clk-table-parsed.txt + +# Remove some prefixes and suffixes to match clock summary +sed -i '/_nr_clk/d' clk-table-parsed.txt # number of clocks +sed -i 's/_self$/sf/g' clk-table-parsed.txt +sed -i 's/^apmixed_//g' clk-table-parsed.txt +sed -i 's/^top_//g' clk-table-parsed.txt +sed -i 's/_top_/_/g' clk-table-parsed.txt +sed -i 's/_top$//g' clk-table-parsed.txt +sed -i 's/_ck_/_/g' clk-table-parsed.txt +sed -i 's/_ck$//g' clk-table-parsed.txt +sed -i 's/_clk$//g' clk-table-parsed.txt +sed -i 's/en_v/v/g' clk-table-parsed.txt +sed -i 's/_bclk$/b/g' clk-table-parsed.txt +sed -i 's/bclk$/b/g' clk-table-parsed.txt +sed -i 's/_hclk$/h/g' clk-table-parsed.txt +sed -i 's/_h$/h/g' clk-table-parsed.txt +sed -i 's/pclk/p/g' clk-table-parsed.txt +sed -i 's/_src$//g' clk-table-parsed.txt +sed -i 's/device_/d/g' clk-table-parsed.txt +sed -i 's/rg_//g' clk-table-parsed.txt + + + + +# Sort clock table alphabetically +sort -o clk-table-parsed.txt clk-table-parsed.txt + +# Extract clock names from clk_summary +awk '{print $1}' "${CLK_SUMMARY}" > clk-summary-parsed.txt + +# Remove some prefixes to match clock table +sed -i 's/^top_//g' clk-summary-parsed.txt +sed -i 's/_top_/_/g' clk-summary-parsed.txt +sed -i 's/_top$//g' clk-summary-parsed.txt +sed -i 's/^hd_//g' clk-summary-parsed.txt +sed -i 's/_set$//g' clk-summary-parsed.txt +sed -i 's/_ck_/_/g' clk-summary-parsed.txt +sed -i 's/_ck$//g' clk-summary-parsed.txt +sed -i 's/_clk$//g' clk-summary-parsed.txt +sed -i 's/_bclk$/b/g' clk-summary-parsed.txt +sed -i 's/bclk$/b/g' clk-summary-parsed.txt +sed -i 's/_hclk$/h/g' clk-summary-parsed.txt +sed -i 's/_h$/h/g' clk-summary-parsed.txt +# Sort clock names of clk_summary +sort -o clk-summary-parsed.txt clk-summary-parsed.txt + +# Diff +diff clk-table-parsed.txt clk-summary-parsed.txt | grep -v "^[0-9c0-9]" > missing-clocks-raw.txt + +# Get only missing clocks in clk_summary +grep -oP '(?<=\< ).*' missing-clocks-raw.txt > missing-clocks-lcase.txt + +# Transform to uppercase for convenience +dd if=missing-clocks-lcase.txt of=missing-clocks.txt conv=ucase 1>/dev/null 2>&1 + +NR_MISSING_CLKS=$(wc -l < missing-clocks.txt) + +if [ "${NR_MISSING_CLKS}" -ne "0" ]; then + printf "[-] Missing clocks: \n" + cat missing-clocks.txt + + printf "\n[-] Count missing clocks: "${NR_MISSING_CLKS}"\n" +else + printf "[-] Success, all clocks are mapped !\n" +fi + +rm missing-clocks-raw.txt missing-clocks-lcase.txt clk-summary-parsed.txt clk-table-parsed.txt diff --git a/contrib/genio/bin/verify-mt8195-ccf.sh b/contrib/genio/bin/verify-mt8195-ccf.sh new file mode 100755 index 0000000000..40ef510b76 --- /dev/null +++ b/contrib/genio/bin/verify-mt8195-ccf.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +# Author: Amjad Ouled-Ameur + +CLK_TABLE= +CLK_SUMMARY= +NR_MISSING_CLKS= + +if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then + printf "verify-ccf -t clock-table.h -s clk_summary\n\n" + printf "options:\n" + printf "" + printf " -t, --table Clock table from the source code\n" + printf " -s, --summary Output of \"/sys/kernel/debug/clk/clk_summary\"\n" + exit +fi + +while [[ $# -gt 0 ]]; do + key="$1" + + case $key in + -t|--table) + CLK_TABLE="$2" + shift # past argument + shift # past value + ;; + -s|--summary) + CLK_SUMMARY="$2" + shift + shift + ;; + *) # unknown option + shift # past argument + ;; + esac +done + +# Extract clocks names from clock table header +grep -oP '(?<=#define CLK_).*' "${CLK_TABLE}" | awk '{print tolower($1)}' > clk-table-parsed.txt + +# Remove some prefixes and suffixes to match clock summary +sed -i '/_nr_clk/d' clk-table-parsed.txt # number of clocks +sed -i 's/_self$/f/g' clk-table-parsed.txt +sed -i 's/^apmixed_//g' clk-table-parsed.txt +sed -i 's/^top_//g' clk-table-parsed.txt + +# Sort clock table alphabetically +sort -o clk-table-parsed.txt clk-table-parsed.txt + +# Extract clock names from clk_summary +awk '{print $1}' "${CLK_SUMMARY}" > clk-summary-parsed.txt + +# Remove some prefixes to match clock table +sed -i 's/^top_//g' clk-summary-parsed.txt + +# Sort clock names of clk_summary +sort -o clk-summary-parsed.txt clk-summary-parsed.txt + +# Diff +diff clk-table-parsed.txt clk-summary-parsed.txt | grep -v "^[0-9c0-9]" > missing-clocks-raw.txt + +# Get only missing clocks in clk_summary +grep -oP '(?<=\< ).*' missing-clocks-raw.txt > missing-clocks-lcase.txt + +# Transform to uppercase for convenience +dd if=missing-clocks-lcase.txt of=missing-clocks.txt conv=ucase 1>/dev/null 2>&1 + +NR_MISSING_CLKS=$(wc -l < missing-clocks.txt) + +if [ "${NR_MISSING_CLKS}" -ne "0" ]; then + printf "[-] Missing clocks: \n" + cat missing-clocks.txt + + printf "\n[-] Count missing clocks: "${NR_MISSING_CLKS}"\n" +else + printf "[-] Success, all clocks are mapped !\n" +fi + +rm missing-clocks-raw.txt missing-clocks-lcase.txt clk-summary-parsed.txt clk-table-parsed.txt diff --git a/contrib/genio/data/linux-ccf/mt8188-clk.h b/contrib/genio/data/linux-ccf/mt8188-clk.h new file mode 100755 index 0000000000..e67f637eb6 --- /dev/null +++ b/contrib/genio/data/linux-ccf/mt8188-clk.h @@ -0,0 +1,734 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Garmin Chang + */ + +#ifndef _DT_BINDINGS_CLK_MT8188_H +#define _DT_BINDINGS_CLK_MT8188_H + +/* TOPCKGEN */ +#define CLK_TOP_AXI 0 +#define CLK_TOP_SPM 1 +#define CLK_TOP_SCP 2 +#define CLK_TOP_BUS_AXIMEM 3 +#define CLK_TOP_VPP 4 +#define CLK_TOP_ETHDR 5 +#define CLK_TOP_IPE 6 +#define CLK_TOP_CAM 7 +#define CLK_TOP_CCU 8 +#define CLK_TOP_CCU_AHB 9 +#define CLK_TOP_IMG 10 +#define CLK_TOP_CAMTM 11 +#define CLK_TOP_DSP 12 +#define CLK_TOP_DSP1 13 +#define CLK_TOP_DSP2 14 +#define CLK_TOP_DSP3 15 +#define CLK_TOP_DSP4 16 +#define CLK_TOP_DSP5 17 +#define CLK_TOP_DSP6 18 +#define CLK_TOP_DSP7 19 +#define CLK_TOP_MFG_CORE_TMP 20 +#define CLK_TOP_CAMTG 21 +#define CLK_TOP_CAMTG2 22 +#define CLK_TOP_CAMTG3 23 +#define CLK_TOP_UART 24 +#define CLK_TOP_SPI 25 +#define CLK_TOP_MSDC5HCLK 26 +#define CLK_TOP_MSDC50_0 27 +#define CLK_TOP_MSDC30_1 28 +#define CLK_TOP_MSDC30_2 29 +#define CLK_TOP_INTDIR 30 +#define CLK_TOP_AUD_INTBUS 31 +#define CLK_TOP_AUDIO_H 32 +#define CLK_TOP_PWRAP_ULPOSC 33 +#define CLK_TOP_ATB 34 +#define CLK_TOP_SSPM 35 +#define CLK_TOP_DP 36 +#define CLK_TOP_EDP 37 +#define CLK_TOP_DPI 38 +#define CLK_TOP_DISP_PWM0 39 +#define CLK_TOP_DISP_PWM1 40 +#define CLK_TOP_USB_TOP 41 +#define CLK_TOP_SSUSB_XHCI 42 +#define CLK_TOP_USB_TOP_2P 43 +#define CLK_TOP_SSUSB_XHCI_2P 44 +#define CLK_TOP_USB_TOP_3P 45 +#define CLK_TOP_SSUSB_XHCI_3P 46 +#define CLK_TOP_I2C 47 +#define CLK_TOP_SENINF 48 +#define CLK_TOP_SENINF1 49 +#define CLK_TOP_GCPU 50 +#define CLK_TOP_VENC 51 +#define CLK_TOP_VDEC 52 +#define CLK_TOP_PWM 53 +#define CLK_TOP_MCUPM 54 +#define CLK_TOP_SPMI_P_MST 55 +#define CLK_TOP_SPMI_M_MST 56 +#define CLK_TOP_DVFSRC 57 +#define CLK_TOP_TL 58 +#define CLK_TOP_AES_MSDCFDE 59 +#define CLK_TOP_DSI_OCC 60 +#define CLK_TOP_WPE_VPP 61 +#define CLK_TOP_HDCP 62 +#define CLK_TOP_HDCP_24M 63 +#define CLK_TOP_HDMI_APB 64 +#define CLK_TOP_SNPS_ETH_250M 65 +#define CLK_TOP_SNPS_ETH_62P4M_PTP 66 +#define CLK_TOP_SNPS_ETH_50M_RMII 67 +#define CLK_TOP_ADSP 68 +#define CLK_TOP_AUDIO_LOCAL_BUS 69 +#define CLK_TOP_ASM_H 70 +#define CLK_TOP_ASM_L 71 +#define CLK_TOP_APLL1 72 +#define CLK_TOP_APLL2 73 +#define CLK_TOP_APLL3 74 +#define CLK_TOP_APLL4 75 +#define CLK_TOP_APLL5 76 +#define CLK_TOP_I2SO1 77 +#define CLK_TOP_I2SO2 78 +#define CLK_TOP_I2SI1 79 +#define CLK_TOP_I2SI2 80 +#define CLK_TOP_DPTX 81 +#define CLK_TOP_AUD_IEC 82 +#define CLK_TOP_A1SYS_HP 83 +#define CLK_TOP_A2SYS 84 +#define CLK_TOP_A3SYS 85 +#define CLK_TOP_A4SYS 86 +#define CLK_TOP_ECC 87 +#define CLK_TOP_SPINOR 88 +#define CLK_TOP_ULPOSC 89 +#define CLK_TOP_SRCK 90 +#define CLK_TOP_MFG_CK_FAST_REF 91 +#define CLK_TOP_MFGPLL 92 +#define CLK_TOP_MAINPLL 93 +#define CLK_TOP_MAINPLL_D3 94 +#define CLK_TOP_MAINPLL_D4 95 +#define CLK_TOP_MAINPLL_D4_D2 96 +#define CLK_TOP_MAINPLL_D4_D4 97 +#define CLK_TOP_MAINPLL_D4_D8 98 +#define CLK_TOP_MAINPLL_D5 99 +#define CLK_TOP_MAINPLL_D5_D2 100 +#define CLK_TOP_MAINPLL_D5_D4 101 +#define CLK_TOP_MAINPLL_D5_D8 102 +#define CLK_TOP_MAINPLL_D6 103 +#define CLK_TOP_MAINPLL_D6_D2 104 +#define CLK_TOP_MAINPLL_D6_D4 105 +#define CLK_TOP_MAINPLL_D6_D8 106 +#define CLK_TOP_MAINPLL_D7 107 +#define CLK_TOP_MAINPLL_D7_D2 108 +#define CLK_TOP_MAINPLL_D7_D4 109 +#define CLK_TOP_MAINPLL_D7_D8 110 +#define CLK_TOP_MAINPLL_D9 111 +#define CLK_TOP_UNIVPLL 112 +#define CLK_TOP_UNIVPLL_D2 113 +#define CLK_TOP_UNIVPLL_D3 114 +#define CLK_TOP_UNIVPLL_D4 115 +#define CLK_TOP_UNIVPLL_D4_D2 116 +#define CLK_TOP_UNIVPLL_D4_D4 117 +#define CLK_TOP_UNIVPLL_D4_D8 118 +#define CLK_TOP_UNIVPLL_D5 119 +#define CLK_TOP_UNIVPLL_D5_D2 120 +#define CLK_TOP_UNIVPLL_D5_D4 121 +#define CLK_TOP_UNIVPLL_D5_D8 122 +#define CLK_TOP_UNIVPLL_D6 123 +#define CLK_TOP_UNIVPLL_D6_D2 124 +#define CLK_TOP_UNIVPLL_D6_D4 125 +#define CLK_TOP_UNIVPLL_D6_D8 126 +#define CLK_TOP_UNIVPLL_D7 127 +#define CLK_TOP_UNIVPLL_192M 128 +#define CLK_TOP_UNIVPLL_192M_D4 129 +#define CLK_TOP_UNIVPLL_192M_D8 130 +#define CLK_TOP_UNIVPLL_192M_D10 131 +#define CLK_TOP_UNIVPLL_192M_D16 132 +#define CLK_TOP_UNIVPLL_192M_D32 133 +#define CLK_TOP_IMGPLL 134 +#define CLK_TOP_APLL1_D3 135 +#define CLK_TOP_APLL1_D4 136 +#define CLK_TOP_APLL2_D3 137 +#define CLK_TOP_APLL2_D4 138 +#define CLK_TOP_APLL3_D4 139 +#define CLK_TOP_APLL4_D4 140 +#define CLK_TOP_APLL5_D4 141 +#define CLK_TOP_MMPLL 142 +#define CLK_TOP_MMPLL_D4 143 +#define CLK_TOP_MMPLL_D4_D2 144 +#define CLK_TOP_MMPLL_D5 145 +#define CLK_TOP_MMPLL_D5_D2 146 +#define CLK_TOP_MMPLL_D5_D4 147 +#define CLK_TOP_MMPLL_D6 148 +#define CLK_TOP_MMPLL_D6_D2 149 +#define CLK_TOP_MMPLL_D7 150 +#define CLK_TOP_MMPLL_D9 151 +#define CLK_TOP_TVDPLL1 152 +#define CLK_TOP_TVDPLL1_D2 153 +#define CLK_TOP_TVDPLL1_D4 154 +#define CLK_TOP_TVDPLL1_D8 155 +#define CLK_TOP_TVDPLL1_D16 156 +#define CLK_TOP_TVDPLL2 157 +#define CLK_TOP_TVDPLL2_D2 158 +#define CLK_TOP_TVDPLL2_D4 159 +#define CLK_TOP_TVDPLL2_D8 160 +#define CLK_TOP_TVDPLL2_D16 161 +#define CLK_TOP_MSDCPLL 162 +#define CLK_TOP_MSDCPLL_D2 163 +#define CLK_TOP_MSDCPLL_D16 164 +#define CLK_TOP_ETHPLL 165 +#define CLK_TOP_ETHPLL_D2 166 +#define CLK_TOP_ETHPLL_D4 167 +#define CLK_TOP_ETHPLL_D8 168 +#define CLK_TOP_ETHPLL_D10 169 +#define CLK_TOP_ADSPPLL 170 +#define CLK_TOP_ADSPPLL_D2 171 +#define CLK_TOP_ADSPPLL_D4 172 +#define CLK_TOP_ADSPPLL_D8 173 +#define CLK_TOP_ULPOSC_CK1 174 +#define CLK_TOP_ULPOSC1_D2 175 +#define CLK_TOP_ULPOSC1_D4 176 +#define CLK_TOP_ULPOSC1_D8 177 +#define CLK_TOP_ULPOSC1_D7 178 +#define CLK_TOP_ULPOSC1_D10 179 +#define CLK_TOP_ULPOSC1_D16 180 +#define CLK_TOP_MPHONE_SLAVE_BCK 181 +#define CLK_TOP_PAD_FPC 182 +#define CLK_TOP_466M_FMEM 183 +#define CLK_TOP_PEXTP_PIPE 184 +#define CLK_TOP_DSI_PHY 185 +#define CLK_TOP_APLL12_CK_DIV0 186 +#define CLK_TOP_APLL12_CK_DIV1 187 +#define CLK_TOP_APLL12_CK_DIV2 188 +#define CLK_TOP_APLL12_CK_DIV3 189 +#define CLK_TOP_APLL12_CK_DIV4 190 +#define CLK_TOP_APLL12_CK_DIV9 191 +#define CLK_TOP_CFGREG_CLOCK_EN_VPP0 192 +#define CLK_TOP_CFGREG_CLOCK_EN_VPP1 193 +#define CLK_TOP_CFGREG_CLOCK_EN_VDO0 194 +#define CLK_TOP_CFGREG_CLOCK_EN_VDO1 195 +#define CLK_TOP_CFGREG_CLOCK_ISP_AXI_GALS 196 +#define CLK_TOP_CFGREG_F26M_VPP0 197 +#define CLK_TOP_CFGREG_F26M_VPP1 198 +#define CLK_TOP_CFGREG_F26M_VDO0 199 +#define CLK_TOP_CFGREG_F26M_VDO1 200 +#define CLK_TOP_CFGREG_AUD_F26M_AUD 201 +#define CLK_TOP_CFGREG_UNIPLL_SES 202 +#define CLK_TOP_CFGREG_F_PCIE_PHY_REF 203 +#define CLK_TOP_SSUSB_TOP_REF 204 +#define CLK_TOP_SSUSB_PHY_REF 205 +#define CLK_TOP_SSUSB_TOP_P1_REF 206 +#define CLK_TOP_SSUSB_PHY_P1_REF 207 +#define CLK_TOP_SSUSB_TOP_P2_REF 208 +#define CLK_TOP_SSUSB_PHY_P2_REF 209 +#define CLK_TOP_SSUSB_TOP_P3_REF 210 +#define CLK_TOP_SSUSB_PHY_P3_REF 211 +#define CLK_TOP_NR_CLK 212 + +/* INFRACFG_AO */ +#define CLK_INFRA_AO_PMIC_TMR 0 +#define CLK_INFRA_AO_PMIC_AP 1 +#define CLK_INFRA_AO_PMIC_MD 2 +#define CLK_INFRA_AO_PMIC_CONN 3 +#define CLK_INFRA_AO_SEJ 4 +#define CLK_INFRA_AO_APXGPT 5 +#define CLK_INFRA_AO_GCE 6 +#define CLK_INFRA_AO_GCE2 7 +#define CLK_INFRA_AO_THERM 8 +#define CLK_INFRA_AO_PWM_HCLK 9 +#define CLK_INFRA_AO_PWM1 10 +#define CLK_INFRA_AO_PWM2 11 +#define CLK_INFRA_AO_PWM3 12 +#define CLK_INFRA_AO_PWM4 13 +#define CLK_INFRA_AO_PWM 14 +#define CLK_INFRA_AO_UART0 15 +#define CLK_INFRA_AO_UART1 16 +#define CLK_INFRA_AO_UART2 17 +#define CLK_INFRA_AO_UART3 18 +#define CLK_INFRA_AO_UART4 19 +#define CLK_INFRA_AO_GCE_26M 20 +#define CLK_INFRA_AO_DMA 21 +#define CLK_INFRA_AO_UART5 22 +#define CLK_INFRA_AO_HDMI_26M 23 +#define CLK_INFRA_AO_SPI0 24 +#define CLK_INFRA_AO_MSDC0 25 +#define CLK_INFRA_AO_MSDC1 26 +#define CLK_INFRA_AO_MSDC2 27 +#define CLK_INFRA_AO_MSDC0_SRC 28 +#define CLK_INFRA_AO_DVFSRC 29 +#define CLK_INFRA_AO_TRNG 30 +#define CLK_INFRA_AO_AUXADC 31 +#define CLK_INFRA_AO_CPUM 32 +#define CLK_INFRA_AO_HDMI_32K 33 +#define CLK_INFRA_AO_CEC_66M_HCLK 34 +#define CLK_INFRA_AO_PCIE_TL_26M 35 +#define CLK_INFRA_AO_MSDC1_SRC 36 +#define CLK_INFRA_AO_CEC_66M_BCLK 37 +#define CLK_INFRA_AO_PCIE_TL_96M 38 +#define CLK_INFRA_AO_DEVICE_APC 39 +#define CLK_INFRA_AO_ECC_66M_HCLK 40 +#define CLK_INFRA_AO_DEBUGSYS 41 +#define CLK_INFRA_AO_AUDIO 42 +#define CLK_INFRA_AO_PCIE_TL_32K 43 +#define CLK_INFRA_AO_DBG_TRACE 44 +#define CLK_INFRA_AO_DRAMC26 45 +#define CLK_INFRA_AO_IRTX 46 +#define CLK_INFRA_AO_DISP_PWM 47 +#define CLK_INFRA_AO_CLDMA_BCLK 48 +#define CLK_INFRA_AO_AUDIO26M 49 +#define CLK_INFRA_AO_SPI1 50 +#define CLK_INFRA_AO_SPI2 51 +#define CLK_INFRA_AO_SPI3 52 +#define CLK_INFRA_AO_FSSPM 53 +#define CLK_INFRA_AO_SSPM_HCLK 54 +#define CLK_INFRA_AO_APDMA_BCLK 55 +#define CLK_INFRA_AO_SPI4 56 +#define CLK_INFRA_AO_SPI5 57 +#define CLK_INFRA_AO_CQ_DMA 58 +#define CLK_INFRA_AO_MSDC0_SELF 59 +#define CLK_INFRA_AO_MSDC1_SELF 60 +#define CLK_INFRA_AO_MSDC2_SELF 61 +#define CLK_INFRA_AO_I2S_DMA 62 +#define CLK_INFRA_AO_AP_MSDC0 63 +#define CLK_INFRA_AO_MD_MSDC0 64 +#define CLK_INFRA_AO_MSDC30_2 65 +#define CLK_INFRA_AO_GCPU 66 +#define CLK_INFRA_AO_PCIE_PERI_26M 67 +#define CLK_INFRA_AO_GCPU_66M_BCLK 68 +#define CLK_INFRA_AO_GCPU_133M_BCLK 69 +#define CLK_INFRA_AO_DISP_PWM1 70 +#define CLK_INFRA_AO_FBIST2FPC 71 +#define CLK_INFRA_AO_DEVICE_APC_SYNC 72 +#define CLK_INFRA_AO_PCIE_P1_PERI_26M 73 +#define CLK_INFRA_AO_133M_MCLK_CK 74 +#define CLK_INFRA_AO_66M_MCLK_CK 75 +#define CLK_INFRA_AO_PCIE_PL_P_250M_P0 76 +#define CLK_INFRA_AO_RG_AES_MSDCFDE_CK_0P 77 +#define CLK_INFRA_AO_NR_CLK 78 + +/* APMIXEDSYS */ +#define CLK_APMIXED_ETHPLL 0 +#define CLK_APMIXED_MSDCPLL 1 +#define CLK_APMIXED_TVDPLL1 2 +#define CLK_APMIXED_TVDPLL2 3 +#define CLK_APMIXED_MMPLL 4 +#define CLK_APMIXED_MAINPLL 5 +#define CLK_APMIXED_IMGPLL 6 +#define CLK_APMIXED_UNIVPLL 7 +#define CLK_APMIXED_ADSPPLL 8 +#define CLK_APMIXED_APLL1 9 +#define CLK_APMIXED_APLL2 10 +#define CLK_APMIXED_APLL3 11 +#define CLK_APMIXED_APLL4 12 +#define CLK_APMIXED_APLL5 13 +#define CLK_APMIXED_MFGPLL 14 +#define CLK_APMIXED_PLL_SSUSB26M_EN 15 +#define CLK_APMIXED_NR_CLK 16 + +/* AUDIODSP */ +#define CLK_AUDIODSP_AUDIO26M 0 +#define CLK_AUDIODSP_NR_CLK 1 + +/* PERICFG_AO */ +#define CLK_PERI_AO_ETHERNET 0 +#define CLK_PERI_AO_ETHERNET_BUS 1 +#define CLK_PERI_AO_FLASHIF_BUS 2 +#define CLK_PERI_AO_FLASHIF_26M 3 +#define CLK_PERI_AO_FLASHIFLASHCK 4 +#define CLK_PERI_AO_SSUSB_2P_BUS 5 +#define CLK_PERI_AO_SSUSB_2P_XHCI 6 +#define CLK_PERI_AO_SSUSB_3P_BUS 7 +#define CLK_PERI_AO_SSUSB_3P_XHCI 8 +#define CLK_PERI_AO_SSUSB_BUS 9 +#define CLK_PERI_AO_SSUSB_XHCI 10 +#define CLK_PERI_AO_ETHERNET_MAC 11 +#define CLK_PERI_AO_PCIE_P0_FMEM 12 +#define CLK_PERI_AO_NR_CLK 13 + +/* IMP_IIC_WRAP_C */ +#define CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C0 0 +#define CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C2 1 +#define CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C3 2 +#define CLK_IMP_IIC_WRAP_C_NR_CLK 3 + +/* IMP_IIC_WRAP_W */ +#define CLK_IMP_IIC_WRAP_W_AP_CLOCK_I2C1 0 +#define CLK_IMP_IIC_WRAP_W_AP_CLOCK_I2C4 1 +#define CLK_IMP_IIC_WRAP_W_NR_CLK 2 + +/* IMP_IIC_WRAP_EN */ +#define CLK_IMP_IIC_WRAP_EN_AP_CLOCK_I2C5 0 +#define CLK_IMP_IIC_WRAP_EN_AP_CLOCK_I2C6 1 +#define CLK_IMP_IIC_WRAP_EN_NR_CLK 2 + +/* MFGCFG */ +#define CLK_MFGCFG_BG3D 0 +#define CLK_MFGCFG_NR_CLK 1 + +/* VPPSYS0 */ +#define CLK_VPP0_MDP_FG 0 +#define CLK_VPP0_STITCH 1 +#define CLK_VPP0_PADDING 2 +#define CLK_VPP0_MDP_TCC 3 +#define CLK_VPP0_WARP0_ASYNC_TX 4 +#define CLK_VPP0_WARP1_ASYNC_TX 5 +#define CLK_VPP0_MUTEX 6 +#define CLK_VPP02VPP1_RELAY 7 +#define CLK_VPP0_VPP12VPP0_ASYNC 8 +#define CLK_VPP0_MMSYSRAM_TOP 9 +#define CLK_VPP0_MDP_AAL 10 +#define CLK_VPP0_MDP_RSZ 11 +#define CLK_VPP0_SMI_COMMON_MMSRAM 12 +#define CLK_VPP0_GALS_VDO0_LARB0_MMSRAM 13 +#define CLK_VPP0_GALS_VDO0_LARB1_MMSRAM 14 +#define CLK_VPP0_GALS_VENCSYS_MMSRAM 15 +#define CLK_VPP0_GALS_VENCSYS_CORE1_MMSRAM 16 +#define CLK_VPP0_GALS_INFRA_MMSRAM 17 +#define CLK_VPP0_GALS_CAMSYS_MMSRAM 18 +#define CLK_VPP0_GALS_VPP1_LARB5_MMSRAM 19 +#define CLK_VPP0_GALS_VPP1_LARB6_MMSRAM 20 +#define CLK_VPP0_SMI_REORDER_MMSRAM 21 +#define CLK_VPP0_SMI_IOMMU 22 +#define CLK_VPP0_GALS_IMGSYS_CAMSYS 23 +#define CLK_VPP0_MDP_RDMA 24 +#define CLK_VPP0_MDP_WROT 25 +#define CLK_VPP0_GALS_EMI0_EMI1 26 +#define CLK_VPP0_SMI_SUB_COMMON_REORDER 27 +#define CLK_VPP0_SMI_RSI 28 +#define CLK_VPP0_SMI_COMMON_LARB4 29 +#define CLK_VPP0_GALS_VDEC_VDEC_CORE1 30 +#define CLK_VPP0_GALS_VPP1_WPESYS 31 +#define CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1 32 +#define CLK_VPP0_FAKE_ENG 33 +#define CLK_VPP0_MDP_HDR 34 +#define CLK_VPP0_MDP_TDSHP 35 +#define CLK_VPP0_MDP_COLOR 36 +#define CLK_VPP0_MDP_OVL 37 +#define CLK_VPP0_DSIP_RDMA 38 +#define CLK_VPP0_DISP_WDMA 39 +#define CLK_VPP0_MDP_HMS 40 +#define CLK_VPP0_WARP0_RELAY 41 +#define CLK_VPP0_WARP0_ASYNC 42 +#define CLK_VPP0_WARP1_RELAY 43 +#define CLK_VPP0_WARP1_ASYNC 44 +#define CLK_VPP0_NR_CLK 45 + +/* WPESYS */ +#define CLK_WPE_TOP_WPE_VPP0 0 +#define CLK_WPE_TOP_SMI_LARB7 1 +#define CLK_WPE_TOP_WPESYS_EVENT_TX 2 +#define CLK_WPE_TOP_SMI_LARB7_PCLK_EN 3 +#define CLK_WPE_TOP_NR_CLK 4 + +/* WPESYS_VPP0 */ +#define CLK_WPE_VPP0_VECI 0 +#define CLK_WPE_VPP0_VEC2I 1 +#define CLK_WPE_VPP0_VEC3I 2 +#define CLK_WPE_VPP0_WPEO 3 +#define CLK_WPE_VPP0_MSKO 4 +#define CLK_WPE_VPP0_VGEN 5 +#define CLK_WPE_VPP0_EXT 6 +#define CLK_WPE_VPP0_VFC 7 +#define CLK_WPE_VPP0_CACH0_TOP 8 +#define CLK_WPE_VPP0_CACH0_DMA 9 +#define CLK_WPE_VPP0_CACH1_TOP 10 +#define CLK_WPE_VPP0_CACH1_DMA 11 +#define CLK_WPE_VPP0_CACH2_TOP 12 +#define CLK_WPE_VPP0_CACH2_DMA 13 +#define CLK_WPE_VPP0_CACH3_TOP 14 +#define CLK_WPE_VPP0_CACH3_DMA 15 +#define CLK_WPE_VPP0_PSP 16 +#define CLK_WPE_VPP0_PSP2 17 +#define CLK_WPE_VPP0_SYNC 18 +#define CLK_WPE_VPP0_C24 19 +#define CLK_WPE_VPP0_MDP_CROP 20 +#define CLK_WPE_VPP0_ISP_CROP 21 +#define CLK_WPE_VPP0_TOP 22 +#define CLK_WPE_VPP0_NR_CLK 23 + +/* VPPSYS1 */ +#define CLK_VPP1_SVPP1_MDP_OVL 0 +#define CLK_VPP1_SVPP1_MDP_TCC 1 +#define CLK_VPP1_SVPP1_MDP_WROT 2 +#define CLK_VPP1_SVPP1_VPP_PAD 3 +#define CLK_VPP1_SVPP2_MDP_WROT 4 +#define CLK_VPP1_SVPP2_VPP_PAD 5 +#define CLK_VPP1_SVPP3_MDP_WROT 6 +#define CLK_VPP1_SVPP3_VPP_PAD 7 +#define CLK_VPP1_SVPP1_MDP_RDMA 8 +#define CLK_VPP1_SVPP1_MDP_FG 9 +#define CLK_VPP1_SVPP2_MDP_RDMA 10 +#define CLK_VPP1_SVPP2_MDP_FG 11 +#define CLK_VPP1_SVPP3_MDP_RDMA 12 +#define CLK_VPP1_SVPP3_MDP_FG 13 +#define CLK_VPP1_VPP_SPLIT 14 +#define CLK_VPP1_SVPP2_VDO0_DL_RELAY 15 +#define CLK_VPP1_SVPP1_MDP_RSZ 16 +#define CLK_VPP1_SVPP1_MDP_TDSHP 17 +#define CLK_VPP1_SVPP1_MDP_COLOR 18 +#define CLK_VPP1_SVPP3_VDO1_DL_RELAY 19 +#define CLK_VPP1_SVPP2_MDP_RSZ 20 +#define CLK_VPP1_SVPP2_VPP_MERGE 21 +#define CLK_VPP1_SVPP2_MDP_TDSHP 22 +#define CLK_VPP1_SVPP2_MDP_COLOR 23 +#define CLK_VPP1_SVPP3_MDP_RSZ 24 +#define CLK_VPP1_SVPP3_VPP_MERGE 25 +#define CLK_VPP1_SVPP3_MDP_TDSHP 26 +#define CLK_VPP1_SVPP3_MDP_COLOR 27 +#define CLK_VPP1_GALS5 28 +#define CLK_VPP1_GALS6 29 +#define CLK_VPP1_LARB5 30 +#define CLK_VPP1_LARB6 31 +#define CLK_VPP1_SVPP1_MDP_HDR 32 +#define CLK_VPP1_SVPP1_MDP_AAL 33 +#define CLK_VPP1_SVPP2_MDP_HDR 34 +#define CLK_VPP1_SVPP2_MDP_AAL 35 +#define CLK_VPP1_SVPP3_MDP_HDR 36 +#define CLK_VPP1_SVPP3_MDP_AAL 37 +#define CLK_VPP1_DISP_MUTEX 38 +#define CLK_VPP1_SVPP2_VDO1_DL_RELAY 39 +#define CLK_VPP1_SVPP3_VDO0_DL_RELAY 40 +#define CLK_VPP1_VPP0_DL_ASYNC 41 +#define CLK_VPP1_VPP0_DL1_RELAY 42 +#define CLK_VPP1_LARB5_FAKE_ENG 43 +#define CLK_VPP1_LARB6_FAKE_ENG 44 +#define CLK_VPP1_HDMI_META 45 +#define CLK_VPP1_VPP_SPLIT_HDMI 46 +#define CLK_VPP1_DGI_IN 47 +#define CLK_VPP1_DGI_OUT 48 +#define CLK_VPP1_VPP_SPLIT_DGI 49 +#define CLK_VPP1_DL_CON_OCC 50 +#define CLK_VPP1_VPP_SPLIT_26M 51 +#define CLK_VPP1_NR_CLK 52 + +/* IMGSYS */ +#define CLK_IMGSYS_MAIN_LARB9 0 +#define CLK_IMGSYS_MAIN_TRAW0 1 +#define CLK_IMGSYS_MAIN_TRAW1 2 +#define CLK_IMGSYS_MAIN_VCORE_GALS 3 +#define CLK_IMGSYS_MAIN_DIP0 4 +#define CLK_IMGSYS_MAIN_WPE0 5 +#define CLK_IMGSYS_MAIN_IPE 6 +#define CLK_IMGSYS_MAIN_WPE1 7 +#define CLK_IMGSYS_MAIN_WPE2 8 +#define CLK_IMGSYS_MAIN_GALS 9 +#define CLK_IMGSYS_MAIN_NR_CLK 10 + +/* IMGSYS1_DIP_TOP */ +#define CLK_IMGSYS1_DIP_TOP_LARB10 0 +#define CLK_IMGSYS1_DIP_TOP_DIP_TOP 1 +#define CLK_IMGSYS1_DIP_TOP_NR_CLK 2 + +/* IMGSYS1_DIP_NR */ +#define CLK_IMGSYS1_DIP_NR_LARB15 0 +#define CLK_IMGSYS1_DIP_NR_DIP_NR 1 +#define CLK_IMGSYS1_DIP_NR_NR_CLK 2 + +/* IMGSYS_WPE1 */ +#define CLK_IMGSYS_WPE1_LARB11 0 +#define CLK_IMGSYS_WPE1 1 +#define CLK_IMGSYS_WPE1_NR_CLK 2 + +/* IPESYS */ +#define CLK_IPE_DPE 0 +#define CLK_IPE_FDVT 1 +#define CLK_IPE_ME 2 +#define CLK_IPESYS_TOP 3 +#define CLK_IPE_SMI_LARB12 4 +#define CLK_IPE_NR_CLK 5 + +/* IMGSYS_WPE2 */ +#define CLK_IMGSYS_WPE2_LARB11 0 +#define CLK_IMGSYS_WPE2 1 +#define CLK_IMGSYS_WPE2_NR_CLK 2 + +/* IMGSYS_WPE3 */ +#define CLK_IMGSYS_WPE3_LARB11 0 +#define CLK_IMGSYS_WPE3 1 +#define CLK_IMGSYS_WPE3_NR_CLK 2 + +/* CAMSYS */ +#define CLK_CAM_MAIN_LARB13 0 +#define CLK_CAM_MAIN_LARB14 1 +#define CLK_CAM_MAIN_CAM 2 +#define CLK_CAM_MAIN_CAM_SUBA 3 +#define CLK_CAM_MAIN_CAM_SUBB 4 +#define CLK_CAM_MAIN_CAMTG 5 +#define CLK_CAM_MAIN_SENINF 6 +#define CLK_CAM_MAIN_GCAMSVA 7 +#define CLK_CAM_MAIN_GCAMSVB 8 +#define CLK_CAM_MAIN_GCAMSVC 9 +#define CLK_CAM_MAIN_GCAMSVD 10 +#define CLK_CAM_MAIN_GCAMSVE 11 +#define CLK_CAM_MAIN_GCAMSVF 12 +#define CLK_CAM_MAIN_GCAMSVG 13 +#define CLK_CAM_MAIN_GCAMSVH 14 +#define CLK_CAM_MAIN_GCAMSVI 15 +#define CLK_CAM_MAIN_GCAMSVJ 16 +#define CLK_CAM_MAIN_CAMSV_TOP 17 +#define CLK_CAM_MAIN_CAMSV_CQ_A 18 +#define CLK_CAM_MAIN_CAMSV_CQ_B 19 +#define CLK_CAM_MAIN_CAMSV_CQ_C 20 +#define CLK_CAM_MAIN_FAKE_ENG 21 +#define CLK_CAM_MAIN_CAM2MM0_GALS 22 +#define CLK_CAM_MAIN_CAM2MM1_GALS 23 +#define CLK_CAM_MAIN_CAM2SYS_GALS 24 +#define CLK_CAM_MAIN_NR_CLK 25 + +/* CAMSYS_RAWA */ +#define CLK_CAM_RAWA_LARBX 0 +#define CLK_CAM_RAWA_CAM 1 +#define CLK_CAM_RAWA_CAMTG 2 +#define CLK_CAM_RAWA_NR_CLK 3 + +/* CAMSYS_YUVA */ +#define CLK_CAM_YUVA_LARBX 0 +#define CLK_CAM_YUVA_CAM 1 +#define CLK_CAM_YUVA_CAMTG 2 +#define CLK_CAM_YUVA_NR_CLK 3 + +/* CAMSYS_RAWB */ +#define CLK_CAM_RAWB_LARBX 0 +#define CLK_CAM_RAWB_CAM 1 +#define CLK_CAM_RAWB_CAMTG 2 +#define CLK_CAM_RAWB_NR_CLK 3 + +/* CAMSYS_YUVB */ +#define CLK_CAM_YUVB_LARBX 0 +#define CLK_CAM_YUVB_CAM 1 +#define CLK_CAM_YUVB_CAMTG 2 +#define CLK_CAM_YUVB_NR_CLK 3 + +/* CCUSYS */ +#define CLK_CCU_LARB27 0 +#define CLK_CCU_AHB 1 +#define CLK_CCU_CCU0 2 +#define CLK_CCU_NR_CLK 3 + +/* VDECSYS_SOC */ +#define CLK_VDE1_SOC_LARB1 0 +#define CLK_VDE1_SOC_LAT 1 +#define CLK_VDE1_SOC_LAT_ACTIVE 2 +#define CLK_VDE1_SOC_LAT_ENG 3 +#define CLK_VDE1_SOC_VDEC 4 +#define CLK_VDE1_SOC_VDEC_ACTIVE 5 +#define CLK_VDE1_SOC_VDEC_ENG 6 +#define CLK_VDE1_NR_CLK 7 + +/* VDECSYS */ +#define CLK_VDE2_LARB1 0 +#define CLK_VDE2_LAT 1 +#define CLK_VDE2_VDEC 2 +#define CLK_VDE2_VDEC_ACTIVE 3 +#define CLK_VDE2_VDEC_ENG 4 +#define CLK_VDE2_NR_CLK 5 + +/* VENCSYS */ +#define CLK_VEN1_CKE0_LARB 0 +#define CLK_VEN1_CKE1_VENC 1 +#define CLK_VEN1_CKE2_JPGENC 2 +#define CLK_VEN1_CKE3_JPGDEC 3 +#define CLK_VEN1_CKE4_JPGDEC_C1 4 +#define CLK_VEN1_CKE5_GALS 5 +#define CLK_VEN1_CKE6_GALS_SRAM 6 +#define CLK_VEN1_NR_CLK 7 + +/* VDOSYS0 */ +#define CLK_VDO0_DISP_OVL0 0 +#define CLK_VDO0_FAKE_ENG0 1 +#define CLK_VDO0_DISP_CCORR0 2 +#define CLK_VDO0_DISP_MUTEX0 3 +#define CLK_VDO0_DISP_GAMMA0 4 +#define CLK_VDO0_DISP_DITHER0 5 +#define CLK_VDO0_DISP_WDMA0 6 +#define CLK_VDO0_DISP_RDMA0 7 +#define CLK_VDO0_DSI0 8 +#define CLK_VDO0_DSI1 9 +#define CLK_VDO0_DSC_WRAP0 10 +#define CLK_VDO0_VPP_MERGE0 11 +#define CLK_VDO0_DP_INTF0 12 +#define CLK_VDO0_DISP_AAL0 13 +#define CLK_VDO0_INLINEROT0 14 +#define CLK_VDO0_APB_BUS 15 +#define CLK_VDO0_DISP_COLOR0 16 +#define CLK_VDO0_MDP_WROT0 17 +#define CLK_VDO0_DISP_RSZ0 18 +#define CLK_VDO0_DISP_POSTMASK0 19 +#define CLK_VDO0_FAKE_ENG1 20 +#define CLK_VDO0_DL_ASYNC2 21 +#define CLK_VDO0_DL_RELAY3 22 +#define CLK_VDO0_DL_RELAY4 23 +#define CLK_VDO0_SMI_GALS 24 +#define CLK_VDO0_SMI_COMMON 25 +#define CLK_VDO0_SMI_EMI 26 +#define CLK_VDO0_SMI_IOMMU 27 +#define CLK_VDO0_SMI_LARB 28 +#define CLK_VDO0_SMI_RSI 29 +#define CLK_VDO0_DSI0_DSI 30 +#define CLK_VDO0_DSI1_DSI 31 +#define CLK_VDO0_DP_INTF0_DP_INTF 32 +#define CLK_VDO0_NR_CLK 33 + +/* VDOSYS1 */ +#define CLK_VDO1_SMI_LARB2 0 +#define CLK_VDO1_SMI_LARB3 1 +#define CLK_VDO1_GALS 2 +#define CLK_VDO1_FAKE_ENG0 3 +#define CLK_VDO1_FAKE_ENG1 4 +#define CLK_VDO1_MDP_RDMA0 5 +#define CLK_VDO1_MDP_RDMA1 6 +#define CLK_VDO1_MDP_RDMA2 7 +#define CLK_VDO1_MDP_RDMA3 8 +#define CLK_VDO1_VPP_MERGE0 9 +#define CLK_VDO1_VPP_MERGE1 10 +#define CLK_VDO1_VPP_MERGE2 11 +#define CLK_VDO1_VPP_MERGE3 12 +#define CLK_VDO1_VPP_MERGE4 13 +#define CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC 14 +#define CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC 15 +#define CLK_VDO1_DISP_MUTEX 16 +#define CLK_VDO1_MDP_RDMA4 17 +#define CLK_VDO1_MDP_RDMA5 18 +#define CLK_VDO1_MDP_RDMA6 19 +#define CLK_VDO1_MDP_RDMA7 20 +#define CLK_VDO1_DP_INTF0_MMCK 21 +#define CLK_VDO1_DPI0_MM 22 +#define CLK_VDO1_DPI1_MM 23 +#define CLK_VDO1_MERGE0_DL_ASYNC 24 +#define CLK_VDO1_MERGE1_DL_ASYNC 25 +#define CLK_VDO1_MERGE2_DL_ASYNC 26 +#define CLK_VDO1_MERGE3_DL_ASYNC 27 +#define CLK_VDO1_MERGE4_DL_ASYNC 28 +#define CLK_VDO1_DSC_VDO1_DL_ASYNC 29 +#define CLK_VDO1_MERGE_VDO1_DL_ASYNC 30 +#define CLK_VDO1_PADDING0 31 +#define CLK_VDO1_PADDING1 32 +#define CLK_VDO1_PADDING2 33 +#define CLK_VDO1_PADDING3 34 +#define CLK_VDO1_PADDING4 35 +#define CLK_VDO1_PADDING5 36 +#define CLK_VDO1_PADDING6 37 +#define CLK_VDO1_PADDING7 38 +#define CLK_VDO1_DISP_RSZ0 39 +#define CLK_VDO1_DISP_RSZ1 40 +#define CLK_VDO1_DISP_RSZ2 41 +#define CLK_VDO1_DISP_RSZ3 42 +#define CLK_VDO1_HDR_VDO_FE0 43 +#define CLK_VDO1_HDR_GFX_FE0 44 +#define CLK_VDO1_HDR_VDO_BE 45 +#define CLK_VDO1_HDR_VDO_FE1 46 +#define CLK_VDO1_HDR_GFX_FE1 47 +#define CLK_VDO1_DISP_MIXER 48 +#define CLK_VDO1_HDR_VDO_FE0_DL_ASYNC 49 +#define CLK_VDO1_HDR_VDO_FE1_DL_ASYNC 50 +#define CLK_VDO1_HDR_GFX_FE0_DL_ASYNC 51 +#define CLK_VDO1_HDR_GFX_FE1_DL_ASYNC 52 +#define CLK_VDO1_HDR_VDO_BE_DL_ASYNC 53 +#define CLK_VDO1_DPI0 54 +#define CLK_VDO1_DISP_MONITOR_DPI0 55 +#define CLK_VDO1_DPI1 56 +#define CLK_VDO1_DISP_MONITOR_DPI1 57 +#define CLK_VDO1_DPINTF 58 +#define CLK_VDO1_DISP_MONITOR_DPINTF 59 +#define CLK_VDO1_26M_SLOW 60 +#define CLK_VDO1_DPI1_HDMI 61 +#define CLK_VDO1_NR_CLK 62 + +#endif /* _DT_BINDINGS_CLK_MT8188_H */ diff --git a/contrib/genio/data/linux-ccf/mt8195-clk.h b/contrib/genio/data/linux-ccf/mt8195-clk.h new file mode 100755 index 0000000000..449f3498a7 --- /dev/null +++ b/contrib/genio/data/linux-ccf/mt8195-clk.h @@ -0,0 +1,864 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (c) 2021 MediaTek Inc. + * Author: Chun-Jie Chen + */ + +#ifndef _DT_BINDINGS_CLK_MT8195_H +#define _DT_BINDINGS_CLK_MT8195_H + +/* TOPCKGEN */ + +#define CLK_TOP_AXI 0 +#define CLK_TOP_SPM 1 +#define CLK_TOP_SCP 2 +#define CLK_TOP_BUS_AXIMEM 3 +#define CLK_TOP_VPP 4 +#define CLK_TOP_ETHDR 5 +#define CLK_TOP_IPE 6 +#define CLK_TOP_CAM 7 +#define CLK_TOP_CCU 8 +#define CLK_TOP_IMG 9 +#define CLK_TOP_CAMTM 10 +#define CLK_TOP_DSP 11 +#define CLK_TOP_DSP1 12 +#define CLK_TOP_DSP2 13 +#define CLK_TOP_DSP3 14 +#define CLK_TOP_DSP4 15 +#define CLK_TOP_DSP5 16 +#define CLK_TOP_DSP6 17 +#define CLK_TOP_DSP7 18 +#define CLK_TOP_IPU_IF 19 +#define CLK_TOP_MFG_CORE_TMP 20 +#define CLK_TOP_CAMTG 21 +#define CLK_TOP_CAMTG2 22 +#define CLK_TOP_CAMTG3 23 +#define CLK_TOP_CAMTG4 24 +#define CLK_TOP_CAMTG5 25 +#define CLK_TOP_UART 26 +#define CLK_TOP_SPI 27 +#define CLK_TOP_SPIS 28 +#define CLK_TOP_MSDC50_0_HCLK 29 +#define CLK_TOP_MSDC50_0 30 +#define CLK_TOP_MSDC30_1 31 +#define CLK_TOP_MSDC30_2 32 +#define CLK_TOP_INTDIR 33 +#define CLK_TOP_AUD_INTBUS 34 +#define CLK_TOP_AUDIO_H 35 +#define CLK_TOP_PWRAP_ULPOSC 36 +#define CLK_TOP_ATB 37 +#define CLK_TOP_PWRMCU 38 +#define CLK_TOP_DP 39 +#define CLK_TOP_EDP 40 +#define CLK_TOP_DPI 41 +#define CLK_TOP_DISP_PWM0 42 +#define CLK_TOP_DISP_PWM1 43 +#define CLK_TOP_USB_TOP 44 +#define CLK_TOP_SSUSB_XHCI 45 +#define CLK_TOP_USB_TOP_1P 46 +#define CLK_TOP_SSUSB_XHCI_1P 47 +#define CLK_TOP_USB_TOP_2P 48 +#define CLK_TOP_SSUSB_XHCI_2P 49 +#define CLK_TOP_USB_TOP_3P 50 +#define CLK_TOP_SSUSB_XHCI_3P 51 +#define CLK_TOP_I2C 52 +#define CLK_TOP_SENINF 53 +#define CLK_TOP_SENINF1 54 +#define CLK_TOP_SENINF2 55 +#define CLK_TOP_SENINF3 56 +#define CLK_TOP_GCPU 57 +#define CLK_TOP_DXCC 58 +#define CLK_TOP_DPMAIF_MAIN 59 +#define CLK_TOP_AES_UFSFDE 60 +#define CLK_TOP_UFS 61 +#define CLK_TOP_UFS_TICK1US 62 +#define CLK_TOP_UFS_MP_SAP_CFG 63 +#define CLK_TOP_VENC 64 +#define CLK_TOP_VDEC 65 +#define CLK_TOP_PWM 66 +#define CLK_TOP_MCUPM 67 +#define CLK_TOP_SPMI_P_MST 68 +#define CLK_TOP_SPMI_M_MST 69 +#define CLK_TOP_DVFSRC 70 +#define CLK_TOP_TL 71 +#define CLK_TOP_TL_P1 72 +#define CLK_TOP_AES_MSDCFDE 73 +#define CLK_TOP_DSI_OCC 74 +#define CLK_TOP_WPE_VPP 75 +#define CLK_TOP_HDCP 76 +#define CLK_TOP_HDCP_24M 77 +#define CLK_TOP_HD20_DACR_REF_CLK 78 +#define CLK_TOP_HD20_HDCP_CCLK 79 +#define CLK_TOP_HDMI_XTAL 80 +#define CLK_TOP_HDMI_APB 81 +#define CLK_TOP_SNPS_ETH_250M 82 +#define CLK_TOP_SNPS_ETH_62P4M_PTP 83 +#define CLK_TOP_SNPS_ETH_50M_RMII 84 +#define CLK_TOP_DGI_OUT 85 +#define CLK_TOP_NNA0 86 +#define CLK_TOP_NNA1 87 +#define CLK_TOP_ADSP 88 +#define CLK_TOP_ASM_H 89 +#define CLK_TOP_ASM_M 90 +#define CLK_TOP_ASM_L 91 +#define CLK_TOP_APLL1 92 +#define CLK_TOP_APLL2 93 +#define CLK_TOP_APLL3 94 +#define CLK_TOP_APLL4 95 +#define CLK_TOP_APLL5 96 +#define CLK_TOP_I2SO1_MCK 97 +#define CLK_TOP_I2SO2_MCK 98 +#define CLK_TOP_I2SI1_MCK 99 +#define CLK_TOP_I2SI2_MCK 100 +#define CLK_TOP_DPTX_MCK 101 +#define CLK_TOP_AUD_IEC_CLK 102 +#define CLK_TOP_A1SYS_HP 103 +#define CLK_TOP_A2SYS_HF 104 +#define CLK_TOP_A3SYS_HF 105 +#define CLK_TOP_A4SYS_HF 106 +#define CLK_TOP_SPINFI_BCLK 107 +#define CLK_TOP_NFI1X 108 +#define CLK_TOP_ECC 109 +#define CLK_TOP_AUDIO_LOCAL_BUS 110 +#define CLK_TOP_SPINOR 111 +#define CLK_TOP_DVIO_DGI_REF 112 +#define CLK_TOP_ULPOSC 113 +#define CLK_TOP_ULPOSC_CORE 114 +#define CLK_TOP_SRCK 115 +#define CLK_TOP_MFG_CK_FAST_REF 116 +#define CLK_TOP_CLK26M_D2 117 +#define CLK_TOP_CLK26M_D52 118 +#define CLK_TOP_IN_DGI 119 +#define CLK_TOP_IN_DGI_D2 120 +#define CLK_TOP_IN_DGI_D4 121 +#define CLK_TOP_IN_DGI_D6 122 +#define CLK_TOP_IN_DGI_D8 123 +#define CLK_TOP_MAINPLL_D3 124 +#define CLK_TOP_MAINPLL_D4 125 +#define CLK_TOP_MAINPLL_D4_D2 126 +#define CLK_TOP_MAINPLL_D4_D4 127 +#define CLK_TOP_MAINPLL_D4_D8 128 +#define CLK_TOP_MAINPLL_D5 129 +#define CLK_TOP_MAINPLL_D5_D2 130 +#define CLK_TOP_MAINPLL_D5_D4 131 +#define CLK_TOP_MAINPLL_D5_D8 132 +#define CLK_TOP_MAINPLL_D6 133 +#define CLK_TOP_MAINPLL_D6_D2 134 +#define CLK_TOP_MAINPLL_D6_D4 135 +#define CLK_TOP_MAINPLL_D6_D8 136 +#define CLK_TOP_MAINPLL_D7 137 +#define CLK_TOP_MAINPLL_D7_D2 138 +#define CLK_TOP_MAINPLL_D7_D4 139 +#define CLK_TOP_MAINPLL_D7_D8 140 +#define CLK_TOP_MAINPLL_D9 141 +#define CLK_TOP_UNIVPLL_D2 142 +#define CLK_TOP_UNIVPLL_D3 143 +#define CLK_TOP_UNIVPLL_D4 144 +#define CLK_TOP_UNIVPLL_D4_D2 145 +#define CLK_TOP_UNIVPLL_D4_D4 146 +#define CLK_TOP_UNIVPLL_D4_D8 147 +#define CLK_TOP_UNIVPLL_D5 148 +#define CLK_TOP_UNIVPLL_D5_D2 149 +#define CLK_TOP_UNIVPLL_D5_D4 150 +#define CLK_TOP_UNIVPLL_D5_D8 151 +#define CLK_TOP_UNIVPLL_D6 152 +#define CLK_TOP_UNIVPLL_D6_D2 153 +#define CLK_TOP_UNIVPLL_D6_D4 154 +#define CLK_TOP_UNIVPLL_D6_D8 155 +#define CLK_TOP_UNIVPLL_D6_D16 156 +#define CLK_TOP_UNIVPLL_D7 157 +#define CLK_TOP_UNIVPLL_192M 158 +#define CLK_TOP_UNIVPLL_192M_D4 159 +#define CLK_TOP_UNIVPLL_192M_D8 160 +#define CLK_TOP_UNIVPLL_192M_D16 161 +#define CLK_TOP_UNIVPLL_192M_D32 162 +#define CLK_TOP_APLL1_D3 163 +#define CLK_TOP_APLL1_D4 164 +#define CLK_TOP_APLL2_D3 165 +#define CLK_TOP_APLL2_D4 166 +#define CLK_TOP_APLL3_D4 167 +#define CLK_TOP_APLL4_D4 168 +#define CLK_TOP_APLL5_D4 169 +#define CLK_TOP_HDMIRX_APLL_D3 170 +#define CLK_TOP_HDMIRX_APLL_D4 171 +#define CLK_TOP_HDMIRX_APLL_D6 172 +#define CLK_TOP_MMPLL_D4 173 +#define CLK_TOP_MMPLL_D4_D2 174 +#define CLK_TOP_MMPLL_D4_D4 175 +#define CLK_TOP_MMPLL_D5 176 +#define CLK_TOP_MMPLL_D5_D2 177 +#define CLK_TOP_MMPLL_D5_D4 178 +#define CLK_TOP_MMPLL_D6 179 +#define CLK_TOP_MMPLL_D6_D2 180 +#define CLK_TOP_MMPLL_D7 181 +#define CLK_TOP_MMPLL_D9 182 +#define CLK_TOP_TVDPLL1_D2 183 +#define CLK_TOP_TVDPLL1_D4 184 +#define CLK_TOP_TVDPLL1_D8 185 +#define CLK_TOP_TVDPLL1_D16 186 +#define CLK_TOP_TVDPLL2_D2 187 +#define CLK_TOP_TVDPLL2_D4 188 +#define CLK_TOP_TVDPLL2_D8 189 +#define CLK_TOP_TVDPLL2_D16 190 +#define CLK_TOP_MSDCPLL_D2 191 +#define CLK_TOP_MSDCPLL_D4 192 +#define CLK_TOP_MSDCPLL_D16 193 +#define CLK_TOP_ETHPLL_D2 194 +#define CLK_TOP_ETHPLL_D8 195 +#define CLK_TOP_ETHPLL_D10 196 +#define CLK_TOP_DGIPLL_D2 197 +#define CLK_TOP_ULPOSC1 198 +#define CLK_TOP_ULPOSC1_D2 199 +#define CLK_TOP_ULPOSC1_D4 200 +#define CLK_TOP_ULPOSC1_D7 201 +#define CLK_TOP_ULPOSC1_D8 202 +#define CLK_TOP_ULPOSC1_D10 203 +#define CLK_TOP_ULPOSC1_D16 204 +#define CLK_TOP_ULPOSC2 205 +#define CLK_TOP_ADSPPLL_D2 206 +#define CLK_TOP_ADSPPLL_D4 207 +#define CLK_TOP_ADSPPLL_D8 208 +#define CLK_TOP_MEM_466M 209 +#define CLK_TOP_MPHONE_SLAVE_B 210 +#define CLK_TOP_PEXTP_PIPE 211 +#define CLK_TOP_UFS_RX_SYMBOL 212 +#define CLK_TOP_UFS_TX_SYMBOL 213 +#define CLK_TOP_SSUSB_U3PHY_P1_P_P0 214 +#define CLK_TOP_UFS_RX_SYMBOL1 215 +#define CLK_TOP_FPC 216 +#define CLK_TOP_HDMIRX_P 217 +#define CLK_TOP_APLL12_DIV0 218 +#define CLK_TOP_APLL12_DIV1 219 +#define CLK_TOP_APLL12_DIV2 220 +#define CLK_TOP_APLL12_DIV3 221 +#define CLK_TOP_APLL12_DIV4 222 +#define CLK_TOP_APLL12_DIV9 223 +#define CLK_TOP_CFG_VPP0 224 +#define CLK_TOP_CFG_VPP1 225 +#define CLK_TOP_CFG_VDO0 226 +#define CLK_TOP_CFG_VDO1 227 +#define CLK_TOP_CFG_UNIPLL_SES 228 +#define CLK_TOP_CFG_26M_VPP0 229 +#define CLK_TOP_CFG_26M_VPP1 230 +#define CLK_TOP_CFG_26M_AUD 231 +#define CLK_TOP_CFG_AXI_EAST 232 +#define CLK_TOP_CFG_AXI_EAST_NORTH 233 +#define CLK_TOP_CFG_AXI_NORTH 234 +#define CLK_TOP_CFG_AXI_SOUTH 235 +#define CLK_TOP_CFG_EXT_TEST 236 +#define CLK_TOP_SSUSB_REF 237 +#define CLK_TOP_SSUSB_PHY_REF 238 +#define CLK_TOP_SSUSB_P1_REF 239 +#define CLK_TOP_SSUSB_PHY_P1_REF 240 +#define CLK_TOP_SSUSB_P2_REF 241 +#define CLK_TOP_SSUSB_PHY_P2_REF 242 +#define CLK_TOP_SSUSB_P3_REF 243 +#define CLK_TOP_SSUSB_PHY_P3_REF 244 +#define CLK_TOP_NR_CLK 245 + +/* INFRACFG_AO */ + +#define CLK_INFRA_AO_PMIC_TMR 0 +#define CLK_INFRA_AO_PMIC_AP 1 +#define CLK_INFRA_AO_PMIC_MD 2 +#define CLK_INFRA_AO_PMIC_CONN 3 +#define CLK_INFRA_AO_SEJ 4 +#define CLK_INFRA_AO_APXGPT 5 +#define CLK_INFRA_AO_GCE 6 +#define CLK_INFRA_AO_GCE2 7 +#define CLK_INFRA_AO_THERM 8 +#define CLK_INFRA_AO_PWM_H 9 +#define CLK_INFRA_AO_PWM1 10 +#define CLK_INFRA_AO_PWM2 11 +#define CLK_INFRA_AO_PWM3 12 +#define CLK_INFRA_AO_PWM4 13 +#define CLK_INFRA_AO_PWM 14 +#define CLK_INFRA_AO_UART0 15 +#define CLK_INFRA_AO_UART1 16 +#define CLK_INFRA_AO_UART2 17 +#define CLK_INFRA_AO_UART3 18 +#define CLK_INFRA_AO_UART4 19 +#define CLK_INFRA_AO_GCE_26M 20 +#define CLK_INFRA_AO_CQ_DMA_FPC 21 +#define CLK_INFRA_AO_UART5 22 +#define CLK_INFRA_AO_HDMI_26M 23 +#define CLK_INFRA_AO_SPI0 24 +#define CLK_INFRA_AO_MSDC0 25 +#define CLK_INFRA_AO_MSDC1 26 +#define CLK_INFRA_AO_CG1_MSDC2 27 +#define CLK_INFRA_AO_MSDC0_SRC 28 +#define CLK_INFRA_AO_TRNG 29 +#define CLK_INFRA_AO_AUXADC 30 +#define CLK_INFRA_AO_CPUM 31 +#define CLK_INFRA_AO_HDMI_32K 32 +#define CLK_INFRA_AO_CEC_66M_H 33 +#define CLK_INFRA_AO_IRRX 34 +#define CLK_INFRA_AO_PCIE_TL_26M 35 +#define CLK_INFRA_AO_MSDC1_SRC 36 +#define CLK_INFRA_AO_CEC_66M_B 37 +#define CLK_INFRA_AO_PCIE_TL_96M 38 +#define CLK_INFRA_AO_DEVICE_APC 39 +#define CLK_INFRA_AO_ECC_66M_H 40 +#define CLK_INFRA_AO_DEBUGSYS 41 +#define CLK_INFRA_AO_AUDIO 42 +#define CLK_INFRA_AO_PCIE_TL_32K 43 +#define CLK_INFRA_AO_DBG_TRACE 44 +#define CLK_INFRA_AO_DRAMC_F26M 45 +#define CLK_INFRA_AO_IRTX 46 +#define CLK_INFRA_AO_SSUSB 47 +#define CLK_INFRA_AO_DISP_PWM 48 +#define CLK_INFRA_AO_CLDMA_B 49 +#define CLK_INFRA_AO_AUDIO_26M_B 50 +#define CLK_INFRA_AO_SPI1 51 +#define CLK_INFRA_AO_SPI2 52 +#define CLK_INFRA_AO_SPI3 53 +#define CLK_INFRA_AO_UNIPRO_SYS 54 +#define CLK_INFRA_AO_UNIPRO_TICK 55 +#define CLK_INFRA_AO_UFS_MP_SAP_B 56 +#define CLK_INFRA_AO_PWRMCU 57 +#define CLK_INFRA_AO_PWRMCU_BUS_H 58 +#define CLK_INFRA_AO_APDMA_B 59 +#define CLK_INFRA_AO_SPI4 60 +#define CLK_INFRA_AO_SPI5 61 +#define CLK_INFRA_AO_CQ_DMA 62 +#define CLK_INFRA_AO_AES_UFSFDE 63 +#define CLK_INFRA_AO_AES 64 +#define CLK_INFRA_AO_UFS_TICK 65 +#define CLK_INFRA_AO_SSUSB_XHCI 66 +#define CLK_INFRA_AO_MSDC0_SELF 67 +#define CLK_INFRA_AO_MSDC1_SELF 68 +#define CLK_INFRA_AO_MSDC2_SELF 69 +#define CLK_INFRA_AO_I2S_DMA 70 +#define CLK_INFRA_AO_AP_MSDC0 71 +#define CLK_INFRA_AO_MD_MSDC0 72 +#define CLK_INFRA_AO_CG3_MSDC2 73 +#define CLK_INFRA_AO_GCPU 74 +#define CLK_INFRA_AO_PCIE_PERI_26M 75 +#define CLK_INFRA_AO_GCPU_66M_B 76 +#define CLK_INFRA_AO_GCPU_133M_B 77 +#define CLK_INFRA_AO_DISP_PWM1 78 +#define CLK_INFRA_AO_FBIST2FPC 79 +#define CLK_INFRA_AO_DEVICE_APC_SYNC 80 +#define CLK_INFRA_AO_PCIE_P1_PERI_26M 81 +#define CLK_INFRA_AO_SPIS0 82 +#define CLK_INFRA_AO_SPIS1 83 +#define CLK_INFRA_AO_133M_M_PERI 84 +#define CLK_INFRA_AO_66M_M_PERI 85 +#define CLK_INFRA_AO_PCIE_PL_P_250M_P0 86 +#define CLK_INFRA_AO_PCIE_PL_P_250M_P1 87 +#define CLK_INFRA_AO_PCIE_P1_TL_96M 88 +#define CLK_INFRA_AO_AES_MSDCFDE_0P 89 +#define CLK_INFRA_AO_UFS_TX_SYMBOL 90 +#define CLK_INFRA_AO_UFS_RX_SYMBOL 91 +#define CLK_INFRA_AO_UFS_RX_SYMBOL1 92 +#define CLK_INFRA_AO_PERI_UFS_MEM_SUB 93 +#define CLK_INFRA_AO_NR_CLK 94 + +/* APMIXEDSYS */ + +#define CLK_APMIXED_NNAPLL 0 +#define CLK_APMIXED_RESPLL 1 +#define CLK_APMIXED_ETHPLL 2 +#define CLK_APMIXED_MSDCPLL 3 +#define CLK_APMIXED_TVDPLL1 4 +#define CLK_APMIXED_TVDPLL2 5 +#define CLK_APMIXED_MMPLL 6 +#define CLK_APMIXED_MAINPLL 7 +#define CLK_APMIXED_VDECPLL 8 +#define CLK_APMIXED_IMGPLL 9 +#define CLK_APMIXED_UNIVPLL 10 +#define CLK_APMIXED_HDMIPLL1 11 +#define CLK_APMIXED_HDMIPLL2 12 +#define CLK_APMIXED_HDMIRX_APLL 13 +#define CLK_APMIXED_USB1PLL 14 +#define CLK_APMIXED_ADSPPLL 15 +#define CLK_APMIXED_APLL1 16 +#define CLK_APMIXED_APLL2 17 +#define CLK_APMIXED_APLL3 18 +#define CLK_APMIXED_APLL4 19 +#define CLK_APMIXED_APLL5 20 +#define CLK_APMIXED_MFGPLL 21 +#define CLK_APMIXED_DGIPLL 22 +#define CLK_APMIXED_PLL_SSUSB26M 23 +#define CLK_APMIXED_NR_CLK 24 + +/* SCP_ADSP */ + +#define CLK_SCP_ADSP_AUDIODSP 0 +#define CLK_SCP_ADSP_NR_CLK 1 + +/* PERICFG_AO */ + +#define CLK_PERI_AO_ETHERNET 0 +#define CLK_PERI_AO_ETHERNET_BUS 1 +#define CLK_PERI_AO_FLASHIF_BUS 2 +#define CLK_PERI_AO_FLASHIF_FLASH 3 +#define CLK_PERI_AO_SSUSB_1P_BUS 4 +#define CLK_PERI_AO_SSUSB_1P_XHCI 5 +#define CLK_PERI_AO_SSUSB_2P_BUS 6 +#define CLK_PERI_AO_SSUSB_2P_XHCI 7 +#define CLK_PERI_AO_SSUSB_3P_BUS 8 +#define CLK_PERI_AO_SSUSB_3P_XHCI 9 +#define CLK_PERI_AO_SPINFI 10 +#define CLK_PERI_AO_ETHERNET_MAC 11 +#define CLK_PERI_AO_NFI_H 12 +#define CLK_PERI_AO_FNFI1X 13 +#define CLK_PERI_AO_PCIE_P0_MEM 14 +#define CLK_PERI_AO_PCIE_P1_MEM 15 +#define CLK_PERI_AO_NR_CLK 16 + +/* IMP_IIC_WRAP_S */ + +#define CLK_IMP_IIC_WRAP_S_I2C5 0 +#define CLK_IMP_IIC_WRAP_S_I2C6 1 +#define CLK_IMP_IIC_WRAP_S_I2C7 2 +#define CLK_IMP_IIC_WRAP_S_NR_CLK 3 + +/* IMP_IIC_WRAP_W */ + +#define CLK_IMP_IIC_WRAP_W_I2C0 0 +#define CLK_IMP_IIC_WRAP_W_I2C1 1 +#define CLK_IMP_IIC_WRAP_W_I2C2 2 +#define CLK_IMP_IIC_WRAP_W_I2C3 3 +#define CLK_IMP_IIC_WRAP_W_I2C4 4 +#define CLK_IMP_IIC_WRAP_W_NR_CLK 5 + +/* MFGCFG */ + +#define CLK_MFG_BG3D 0 +#define CLK_MFG_NR_CLK 1 + +/* VPPSYS0 */ + +#define CLK_VPP0_MDP_FG 0 +#define CLK_VPP0_STITCH 1 +#define CLK_VPP0_PADDING 2 +#define CLK_VPP0_MDP_TCC 3 +#define CLK_VPP0_WARP0_ASYNC_TX 4 +#define CLK_VPP0_WARP1_ASYNC_TX 5 +#define CLK_VPP0_MUTEX 6 +#define CLK_VPP0_VPP02VPP1_RELAY 7 +#define CLK_VPP0_VPP12VPP0_ASYNC 8 +#define CLK_VPP0_MMSYSRAM_TOP 9 +#define CLK_VPP0_MDP_AAL 10 +#define CLK_VPP0_MDP_RSZ 11 +#define CLK_VPP0_SMI_COMMON 12 +#define CLK_VPP0_GALS_VDO0_LARB0 13 +#define CLK_VPP0_GALS_VDO0_LARB1 14 +#define CLK_VPP0_GALS_VENCSYS 15 +#define CLK_VPP0_GALS_VENCSYS_CORE1 16 +#define CLK_VPP0_GALS_INFRA 17 +#define CLK_VPP0_GALS_CAMSYS 18 +#define CLK_VPP0_GALS_VPP1_LARB5 19 +#define CLK_VPP0_GALS_VPP1_LARB6 20 +#define CLK_VPP0_SMI_REORDER 21 +#define CLK_VPP0_SMI_IOMMU 22 +#define CLK_VPP0_GALS_IMGSYS_CAMSYS 23 +#define CLK_VPP0_MDP_RDMA 24 +#define CLK_VPP0_MDP_WROT 25 +#define CLK_VPP0_GALS_EMI0_EMI1 26 +#define CLK_VPP0_SMI_SUB_COMMON_REORDER 27 +#define CLK_VPP0_SMI_RSI 28 +#define CLK_VPP0_SMI_COMMON_LARB4 29 +#define CLK_VPP0_GALS_VDEC_VDEC_CORE1 30 +#define CLK_VPP0_GALS_VPP1_WPE 31 +#define CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1 32 +#define CLK_VPP0_FAKE_ENG 33 +#define CLK_VPP0_MDP_HDR 34 +#define CLK_VPP0_MDP_TDSHP 35 +#define CLK_VPP0_MDP_COLOR 36 +#define CLK_VPP0_MDP_OVL 37 +#define CLK_VPP0_WARP0_RELAY 38 +#define CLK_VPP0_WARP0_MDP_DL_ASYNC 39 +#define CLK_VPP0_WARP1_RELAY 40 +#define CLK_VPP0_WARP1_MDP_DL_ASYNC 41 +#define CLK_VPP0_NR_CLK 42 + +/* WPESYS */ + +#define CLK_WPE_VPP0 0 +#define CLK_WPE_VPP1 1 +#define CLK_WPE_SMI_LARB7 2 +#define CLK_WPE_SMI_LARB8 3 +#define CLK_WPE_EVENT_TX 4 +#define CLK_WPE_SMI_LARB7_P 5 +#define CLK_WPE_SMI_LARB8_P 6 +#define CLK_WPE_NR_CLK 7 + +/* WPESYS_VPP0 */ + +#define CLK_WPE_VPP0_VECI 0 +#define CLK_WPE_VPP0_VEC2I 1 +#define CLK_WPE_VPP0_VEC3I 2 +#define CLK_WPE_VPP0_WPEO 3 +#define CLK_WPE_VPP0_MSKO 4 +#define CLK_WPE_VPP0_VGEN 5 +#define CLK_WPE_VPP0_EXT 6 +#define CLK_WPE_VPP0_VFC 7 +#define CLK_WPE_VPP0_CACH0_TOP 8 +#define CLK_WPE_VPP0_CACH0_DMA 9 +#define CLK_WPE_VPP0_CACH1_TOP 10 +#define CLK_WPE_VPP0_CACH1_DMA 11 +#define CLK_WPE_VPP0_CACH2_TOP 12 +#define CLK_WPE_VPP0_CACH2_DMA 13 +#define CLK_WPE_VPP0_CACH3_TOP 14 +#define CLK_WPE_VPP0_CACH3_DMA 15 +#define CLK_WPE_VPP0_PSP 16 +#define CLK_WPE_VPP0_PSP2 17 +#define CLK_WPE_VPP0_SYNC 18 +#define CLK_WPE_VPP0_C24 19 +#define CLK_WPE_VPP0_MDP_CROP 20 +#define CLK_WPE_VPP0_ISP_CROP 21 +#define CLK_WPE_VPP0_TOP 22 +#define CLK_WPE_VPP0_NR_CLK 23 + +/* WPESYS_VPP1 */ + +#define CLK_WPE_VPP1_VECI 0 +#define CLK_WPE_VPP1_VEC2I 1 +#define CLK_WPE_VPP1_VEC3I 2 +#define CLK_WPE_VPP1_WPEO 3 +#define CLK_WPE_VPP1_MSKO 4 +#define CLK_WPE_VPP1_VGEN 5 +#define CLK_WPE_VPP1_EXT 6 +#define CLK_WPE_VPP1_VFC 7 +#define CLK_WPE_VPP1_CACH0_TOP 8 +#define CLK_WPE_VPP1_CACH0_DMA 9 +#define CLK_WPE_VPP1_CACH1_TOP 10 +#define CLK_WPE_VPP1_CACH1_DMA 11 +#define CLK_WPE_VPP1_CACH2_TOP 12 +#define CLK_WPE_VPP1_CACH2_DMA 13 +#define CLK_WPE_VPP1_CACH3_TOP 14 +#define CLK_WPE_VPP1_CACH3_DMA 15 +#define CLK_WPE_VPP1_PSP 16 +#define CLK_WPE_VPP1_PSP2 17 +#define CLK_WPE_VPP1_SYNC 18 +#define CLK_WPE_VPP1_C24 19 +#define CLK_WPE_VPP1_MDP_CROP 20 +#define CLK_WPE_VPP1_ISP_CROP 21 +#define CLK_WPE_VPP1_TOP 22 +#define CLK_WPE_VPP1_NR_CLK 23 + +/* VPPSYS1 */ + +#define CLK_VPP1_SVPP1_MDP_OVL 0 +#define CLK_VPP1_SVPP1_MDP_TCC 1 +#define CLK_VPP1_SVPP1_MDP_WROT 2 +#define CLK_VPP1_SVPP1_VPP_PAD 3 +#define CLK_VPP1_SVPP2_MDP_WROT 4 +#define CLK_VPP1_SVPP2_VPP_PAD 5 +#define CLK_VPP1_SVPP3_MDP_WROT 6 +#define CLK_VPP1_SVPP3_VPP_PAD 7 +#define CLK_VPP1_SVPP1_MDP_RDMA 8 +#define CLK_VPP1_SVPP1_MDP_FG 9 +#define CLK_VPP1_SVPP2_MDP_RDMA 10 +#define CLK_VPP1_SVPP2_MDP_FG 11 +#define CLK_VPP1_SVPP3_MDP_RDMA 12 +#define CLK_VPP1_SVPP3_MDP_FG 13 +#define CLK_VPP1_VPP_SPLIT 14 +#define CLK_VPP1_SVPP2_VDO0_DL_RELAY 15 +#define CLK_VPP1_SVPP1_MDP_TDSHP 16 +#define CLK_VPP1_SVPP1_MDP_COLOR 17 +#define CLK_VPP1_SVPP3_VDO1_DL_RELAY 18 +#define CLK_VPP1_SVPP2_VPP_MERGE 19 +#define CLK_VPP1_SVPP2_MDP_COLOR 20 +#define CLK_VPP1_VPPSYS1_GALS 21 +#define CLK_VPP1_SVPP3_VPP_MERGE 22 +#define CLK_VPP1_SVPP3_MDP_COLOR 23 +#define CLK_VPP1_VPPSYS1_LARB 24 +#define CLK_VPP1_SVPP1_MDP_RSZ 25 +#define CLK_VPP1_SVPP1_MDP_HDR 26 +#define CLK_VPP1_SVPP1_MDP_AAL 27 +#define CLK_VPP1_SVPP2_MDP_HDR 28 +#define CLK_VPP1_SVPP2_MDP_AAL 29 +#define CLK_VPP1_DL_ASYNC 30 +#define CLK_VPP1_LARB5_FAKE_ENG 31 +#define CLK_VPP1_SVPP3_MDP_HDR 32 +#define CLK_VPP1_SVPP3_MDP_AAL 33 +#define CLK_VPP1_SVPP2_VDO1_DL_RELAY 34 +#define CLK_VPP1_LARB6_FAKE_ENG 35 +#define CLK_VPP1_SVPP2_MDP_RSZ 36 +#define CLK_VPP1_SVPP3_MDP_RSZ 37 +#define CLK_VPP1_SVPP3_VDO0_DL_RELAY 38 +#define CLK_VPP1_DISP_MUTEX 39 +#define CLK_VPP1_SVPP2_MDP_TDSHP 40 +#define CLK_VPP1_SVPP3_MDP_TDSHP 41 +#define CLK_VPP1_VPP0_DL1_RELAY 42 +#define CLK_VPP1_HDMI_META 43 +#define CLK_VPP1_VPP_SPLIT_HDMI 44 +#define CLK_VPP1_DGI_IN 45 +#define CLK_VPP1_DGI_OUT 46 +#define CLK_VPP1_VPP_SPLIT_DGI 47 +#define CLK_VPP1_VPP0_DL_ASYNC 48 +#define CLK_VPP1_VPP0_DL_RELAY 49 +#define CLK_VPP1_VPP_SPLIT_26M 50 +#define CLK_VPP1_NR_CLK 51 + +/* IMGSYS */ + +#define CLK_IMG_LARB9 0 +#define CLK_IMG_TRAW0 1 +#define CLK_IMG_TRAW1 2 +#define CLK_IMG_TRAW2 3 +#define CLK_IMG_TRAW3 4 +#define CLK_IMG_DIP0 5 +#define CLK_IMG_WPE0 6 +#define CLK_IMG_IPE 7 +#define CLK_IMG_DIP1 8 +#define CLK_IMG_WPE1 9 +#define CLK_IMG_GALS 10 +#define CLK_IMG_NR_CLK 11 + +/* IMGSYS1_DIP_TOP */ + +#define CLK_IMG1_DIP_TOP_LARB10 0 +#define CLK_IMG1_DIP_TOP_DIP_TOP 1 +#define CLK_IMG1_DIP_TOP_NR_CLK 2 + +/* IMGSYS1_DIP_NR */ + +#define CLK_IMG1_DIP_NR_RESERVE 0 +#define CLK_IMG1_DIP_NR_DIP_NR 1 +#define CLK_IMG1_DIP_NR_NR_CLK 2 + +/* IMGSYS1_WPE */ + +#define CLK_IMG1_WPE_LARB11 0 +#define CLK_IMG1_WPE_WPE 1 +#define CLK_IMG1_WPE_NR_CLK 2 + +/* IPESYS */ + +#define CLK_IPE_DPE 0 +#define CLK_IPE_FDVT 1 +#define CLK_IPE_ME 2 +#define CLK_IPE_TOP 3 +#define CLK_IPE_SMI_LARB12 4 +#define CLK_IPE_NR_CLK 5 + +/* CAMSYS */ + +#define CLK_CAM_LARB13 0 +#define CLK_CAM_LARB14 1 +#define CLK_CAM_MAIN_CAM 2 +#define CLK_CAM_MAIN_CAMTG 3 +#define CLK_CAM_SENINF 4 +#define CLK_CAM_GCAMSVA 5 +#define CLK_CAM_GCAMSVB 6 +#define CLK_CAM_GCAMSVC 7 +#define CLK_CAM_SCAMSA 8 +#define CLK_CAM_SCAMSB 9 +#define CLK_CAM_CAMSV_TOP 10 +#define CLK_CAM_CAMSV_CQ 11 +#define CLK_CAM_ADL 12 +#define CLK_CAM_ASG 13 +#define CLK_CAM_PDA 14 +#define CLK_CAM_FAKE_ENG 15 +#define CLK_CAM_MAIN_MRAW0 16 +#define CLK_CAM_MAIN_MRAW1 17 +#define CLK_CAM_MAIN_MRAW2 18 +#define CLK_CAM_MAIN_MRAW3 19 +#define CLK_CAM_CAM2MM0_GALS 20 +#define CLK_CAM_CAM2MM1_GALS 21 +#define CLK_CAM_CAM2SYS_GALS 22 +#define CLK_CAM_NR_CLK 23 + +/* CAMSYS_RAWA */ + +#define CLK_CAM_RAWA_LARBX 0 +#define CLK_CAM_RAWA_CAM 1 +#define CLK_CAM_RAWA_CAMTG 2 +#define CLK_CAM_RAWA_NR_CLK 3 + +/* CAMSYS_YUVA */ + +#define CLK_CAM_YUVA_LARBX 0 +#define CLK_CAM_YUVA_CAM 1 +#define CLK_CAM_YUVA_CAMTG 2 +#define CLK_CAM_YUVA_NR_CLK 3 + +/* CAMSYS_RAWB */ + +#define CLK_CAM_RAWB_LARBX 0 +#define CLK_CAM_RAWB_CAM 1 +#define CLK_CAM_RAWB_CAMTG 2 +#define CLK_CAM_RAWB_NR_CLK 3 + +/* CAMSYS_YUVB */ + +#define CLK_CAM_YUVB_LARBX 0 +#define CLK_CAM_YUVB_CAM 1 +#define CLK_CAM_YUVB_CAMTG 2 +#define CLK_CAM_YUVB_NR_CLK 3 + +/* CAMSYS_MRAW */ + +#define CLK_CAM_MRAW_LARBX 0 +#define CLK_CAM_MRAW_CAMTG 1 +#define CLK_CAM_MRAW_MRAW0 2 +#define CLK_CAM_MRAW_MRAW1 3 +#define CLK_CAM_MRAW_MRAW2 4 +#define CLK_CAM_MRAW_MRAW3 5 +#define CLK_CAM_MRAW_NR_CLK 6 + +/* CCUSYS */ + +#define CLK_CCU_LARB18 0 +#define CLK_CCU_AHB 1 +#define CLK_CCU_CCU0 2 +#define CLK_CCU_CCU1 3 +#define CLK_CCU_NR_CLK 4 + +/* VDECSYS_SOC */ + +#define CLK_VDEC_SOC_LARB1 0 +#define CLK_VDEC_SOC_LAT 1 +#define CLK_VDEC_SOC_VDEC 2 +#define CLK_VDEC_SOC_NR_CLK 3 + +/* VDECSYS */ + +#define CLK_VDEC_LARB1 0 +#define CLK_VDEC_LAT 1 +#define CLK_VDEC_VDEC 2 +#define CLK_VDEC_NR_CLK 3 + +/* VDECSYS_CORE1 */ + +#define CLK_VDEC_CORE1_LARB1 0 +#define CLK_VDEC_CORE1_LAT 1 +#define CLK_VDEC_CORE1_VDEC 2 +#define CLK_VDEC_CORE1_NR_CLK 3 + +/* APUSYS_PLL */ + +#define CLK_APUSYS_PLL_APUPLL 0 +#define CLK_APUSYS_PLL_NPUPLL 1 +#define CLK_APUSYS_PLL_APUPLL1 2 +#define CLK_APUSYS_PLL_APUPLL2 3 +#define CLK_APUSYS_PLL_NR_CLK 4 + +/* VENCSYS */ + +#define CLK_VENC_LARB 0 +#define CLK_VENC_VENC 1 +#define CLK_VENC_JPGENC 2 +#define CLK_VENC_JPGDEC 3 +#define CLK_VENC_JPGDEC_C1 4 +#define CLK_VENC_GALS 5 +#define CLK_VENC_NR_CLK 6 + +/* VENCSYS_CORE1 */ + +#define CLK_VENC_CORE1_LARB 0 +#define CLK_VENC_CORE1_VENC 1 +#define CLK_VENC_CORE1_JPGENC 2 +#define CLK_VENC_CORE1_JPGDEC 3 +#define CLK_VENC_CORE1_JPGDEC_C1 4 +#define CLK_VENC_CORE1_GALS 5 +#define CLK_VENC_CORE1_NR_CLK 6 + +/* VDOSYS0 */ + +#define CLK_VDO0_DISP_OVL0 0 +#define CLK_VDO0_DISP_COLOR0 1 +#define CLK_VDO0_DISP_COLOR1 2 +#define CLK_VDO0_DISP_CCORR0 3 +#define CLK_VDO0_DISP_CCORR1 4 +#define CLK_VDO0_DISP_AAL0 5 +#define CLK_VDO0_DISP_AAL1 6 +#define CLK_VDO0_DISP_GAMMA0 7 +#define CLK_VDO0_DISP_GAMMA1 8 +#define CLK_VDO0_DISP_DITHER0 9 +#define CLK_VDO0_DISP_DITHER1 10 +#define CLK_VDO0_DISP_OVL1 11 +#define CLK_VDO0_DISP_WDMA0 12 +#define CLK_VDO0_DISP_WDMA1 13 +#define CLK_VDO0_DISP_RDMA0 14 +#define CLK_VDO0_DISP_RDMA1 15 +#define CLK_VDO0_DSI0 16 +#define CLK_VDO0_DSI1 17 +#define CLK_VDO0_DSC_WRAP0 18 +#define CLK_VDO0_VPP_MERGE0 19 +#define CLK_VDO0_DP_INTF0 20 +#define CLK_VDO0_DISP_MUTEX0 21 +#define CLK_VDO0_DISP_IL_ROT0 22 +#define CLK_VDO0_APB_BUS 23 +#define CLK_VDO0_FAKE_ENG0 24 +#define CLK_VDO0_FAKE_ENG1 25 +#define CLK_VDO0_DL_ASYNC0 26 +#define CLK_VDO0_DL_ASYNC1 27 +#define CLK_VDO0_DL_ASYNC2 28 +#define CLK_VDO0_DL_ASYNC3 29 +#define CLK_VDO0_DL_ASYNC4 30 +#define CLK_VDO0_DISP_MONITOR0 31 +#define CLK_VDO0_DISP_MONITOR1 32 +#define CLK_VDO0_DISP_MONITOR2 33 +#define CLK_VDO0_DISP_MONITOR3 34 +#define CLK_VDO0_DISP_MONITOR4 35 +#define CLK_VDO0_SMI_GALS 36 +#define CLK_VDO0_SMI_COMMON 37 +#define CLK_VDO0_SMI_EMI 38 +#define CLK_VDO0_SMI_IOMMU 39 +#define CLK_VDO0_SMI_LARB 40 +#define CLK_VDO0_SMI_RSI 41 +#define CLK_VDO0_DSI0_DSI 42 +#define CLK_VDO0_DSI1_DSI 43 +#define CLK_VDO0_DP_INTF0_DP_INTF 44 +#define CLK_VDO0_NR_CLK 45 + +/* VDOSYS1 */ + +#define CLK_VDO1_SMI_LARB2 0 +#define CLK_VDO1_SMI_LARB3 1 +#define CLK_VDO1_GALS 2 +#define CLK_VDO1_FAKE_ENG0 3 +#define CLK_VDO1_FAKE_ENG 4 +#define CLK_VDO1_MDP_RDMA0 5 +#define CLK_VDO1_MDP_RDMA1 6 +#define CLK_VDO1_MDP_RDMA2 7 +#define CLK_VDO1_MDP_RDMA3 8 +#define CLK_VDO1_VPP_MERGE0 9 +#define CLK_VDO1_VPP_MERGE1 10 +#define CLK_VDO1_VPP_MERGE2 11 +#define CLK_VDO1_VPP_MERGE3 12 +#define CLK_VDO1_VPP_MERGE4 13 +#define CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC 14 +#define CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC 15 +#define CLK_VDO1_DISP_MUTEX 16 +#define CLK_VDO1_MDP_RDMA4 17 +#define CLK_VDO1_MDP_RDMA5 18 +#define CLK_VDO1_MDP_RDMA6 19 +#define CLK_VDO1_MDP_RDMA7 20 +#define CLK_VDO1_DP_INTF0_MM 21 +#define CLK_VDO1_DPI0_MM 22 +#define CLK_VDO1_DPI1_MM 23 +#define CLK_VDO1_DISP_MONITOR 24 +#define CLK_VDO1_MERGE0_DL_ASYNC 25 +#define CLK_VDO1_MERGE1_DL_ASYNC 26 +#define CLK_VDO1_MERGE2_DL_ASYNC 27 +#define CLK_VDO1_MERGE3_DL_ASYNC 28 +#define CLK_VDO1_MERGE4_DL_ASYNC 29 +#define CLK_VDO1_VDO0_DSC_TO_VDO1_DL_ASYNC 30 +#define CLK_VDO1_VDO0_MERGE_TO_VDO1_DL_ASYNC 31 +#define CLK_VDO1_HDR_VDO_FE0 32 +#define CLK_VDO1_HDR_GFX_FE0 33 +#define CLK_VDO1_HDR_VDO_BE 34 +#define CLK_VDO1_HDR_VDO_FE1 35 +#define CLK_VDO1_HDR_GFX_FE1 36 +#define CLK_VDO1_DISP_MIXER 37 +#define CLK_VDO1_HDR_VDO_FE0_DL_ASYNC 38 +#define CLK_VDO1_HDR_VDO_FE1_DL_ASYNC 39 +#define CLK_VDO1_HDR_GFX_FE0_DL_ASYNC 40 +#define CLK_VDO1_HDR_GFX_FE1_DL_ASYNC 41 +#define CLK_VDO1_HDR_VDO_BE_DL_ASYNC 42 +#define CLK_VDO1_DPI0 43 +#define CLK_VDO1_DISP_MONITOR_DPI0 44 +#define CLK_VDO1_DPI1 45 +#define CLK_VDO1_DISP_MONITOR_DPI1 46 +#define CLK_VDO1_DPINTF 47 +#define CLK_VDO1_DISP_MONITOR_DPINTF 48 +#define CLK_VDO1_26M_SLOW 49 +#define CLK_VDO1_NR_CLK 50 + +#endif /* _DT_BINDINGS_CLK_MT8195_H */ diff --git a/contrib/genio/data/spi/test.bin b/contrib/genio/data/spi/test.bin new file mode 100644 index 0000000000..457332e8db --- /dev/null +++ b/contrib/genio/data/spi/test.bin @@ -0,0 +1 @@ +Canonical spidev_test test diff --git a/contrib/genio/launcher/genio-odm-certification-G1200 b/contrib/genio/launcher/genio-odm-certification-G1200 new file mode 100644 index 0000000000..b2a59262bc --- /dev/null +++ b/contrib/genio/launcher/genio-odm-certification-G1200 @@ -0,0 +1,32 @@ +#!/usr/bin/env checkbox-cli-wrapper +[launcher] +app_id = com.canonical.certification:checkbox +launcher_version = 1 +stock_reports = text, submission_files, certification + +[test plan] +unit = com.canonical.certification::client-cert-odm-genio-desktop-22-04-manual +filter = com.canonical.certification::client-cert-odm-genio-desktop-22-04-manual + com.canonical.certification::client-cert-odm-genio-desktop-22-04-automated + com.canonical.certification::client-cert-odm-genio-server-22-04-manual + com.canonical.certification::client-cert-odm-genio-server-22-04-automated + +[manifest] +has_ethernet_adapter = true +has_i2c = true +has_card_reader = true +has_audio_capture = true +has_audio_playback = true +has_hardware_watchdog = true +has_usbc_video = true +has_usbc_data = true +has_usbc_otg = true +has_genio_amic = true +has_genio_dmic = true +has_genio_pcm = true + + +[environment] +GENIO_DEVICE=G1200-evk +GENIO_GPU_DRIVER_VERSION=r43p0 +GPIO_LOOPBACK_PIN_MAPPING=0:18:0:0:26:100 diff --git a/contrib/genio/launcher/genio-odm-certification-G350 b/contrib/genio/launcher/genio-odm-certification-G350 new file mode 100644 index 0000000000..26784c12d7 --- /dev/null +++ b/contrib/genio/launcher/genio-odm-certification-G350 @@ -0,0 +1,35 @@ +#!/usr/bin/env checkbox-cli-wrapper +[launcher] +app_id = com.canonical.certification:checkbox +launcher_version = 1 +stock_reports = text, submission_files, certification + +[test plan] +unit = com.canonical.certification::client-cert-odm-genio-desktop-22-04-manual +filter = com.canonical.certification::client-cert-odm-genio-desktop-22-04-manual + com.canonical.certification::client-cert-odm-genio-desktop-22-04-automated + com.canonical.certification::client-cert-odm-genio-server-22-04-manual + com.canonical.certification::client-cert-odm-genio-server-22-04-automated + +[test selection] +exclude = com.canonical.certification::ethernet/wol_S3_.* + +[manifest] +has_ethernet_adapter = true +has_i2c = true +has_card_reader = true +has_audio_capture = true +has_audio_playback = true +has_hardware_watchdog = true +has_usbc_video = false +has_usbc_data = false +has_usbc_otg = false +has_genio_amic = true +has_genio_dmic = true +has_genio_pcm = false + + +[environment] +GENIO_DEVICE=G350 +GENIO_GPU_DRIVER_VERSION=r43p0 +GPIO_LOOPBACK_PIN_MAPPING=0:15:109:0:7:124 diff --git a/contrib/genio/launcher/genio-odm-certification-G700 b/contrib/genio/launcher/genio-odm-certification-G700 new file mode 100644 index 0000000000..f39af64199 --- /dev/null +++ b/contrib/genio/launcher/genio-odm-certification-G700 @@ -0,0 +1,31 @@ +#!/usr/bin/env checkbox-cli-wrapper +[launcher] +app_id = com.canonical.certification:checkbox +launcher_version = 1 +stock_reports = text, submission_files, certification + +[test plan] +unit = com.canonical.certification::client-cert-odm-genio-desktop-22-04-manual +filter = com.canonical.certification::client-cert-odm-genio-desktop-22-04-manual + com.canonical.certification::client-cert-odm-genio-desktop-22-04-automated + com.canonical.certification::client-cert-odm-genio-server-22-04-manual + com.canonical.certification::client-cert-odm-genio-server-22-04-automated + +[manifest] +has_ethernet_adapter = true +has_i2c = true +has_card_reader = true +has_audio_capture = true +has_audio_playback = true +has_hardware_watchdog = true +has_usbc_video = true +has_usbc_data = true +has_usbc_otg = true +has_genio_amic = true +has_genio_dmic = true +has_genio_pcm = true + +[environment] +GENIO_DEVICE=G700 +GENIO_GPU_DRIVER_VERSION=r43p0 +GPIO_LOOPBACK_PIN_MAPPING=0:13:37:0:15:41 diff --git a/contrib/genio/manage.py b/contrib/genio/manage.py new file mode 100755 index 0000000000..e6c3d40a03 --- /dev/null +++ b/contrib/genio/manage.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 +from plainbox.provider_manager import setup, N_ + +# You can inject other stuff here but please don't go overboard. +# +# In particular, if you need comprehensive compilation support to get +# your bin/ populated then please try to discuss that with us in the +# upstream project IRC channel #checkbox on irc.freenode.net. + +# NOTE: one thing that you could do here, that makes a lot of sense, +# is to compute version somehow. This may vary depending on the +# context of your provider. Future version of PlainBox will offer git, +# bzr and mercurial integration using the versiontools library +# (optional) + +setup( + name="checkbox-provider-genio", + namespace="com.canonical.contrib", + version="1.0", + description=N_("The Genio provider"), + gettext_domain="checkbox-provider-genio", +) diff --git a/contrib/genio/tox.ini b/contrib/genio/tox.ini new file mode 100644 index 0000000000..cfe5704b8c --- /dev/null +++ b/contrib/genio/tox.ini @@ -0,0 +1,85 @@ +[tox] +envlist = py35,py36,py38,py310 +skip_missing_interpreters = true +skipsdist=True + +[testenv] +allowlist_externals = rm +commands = + {envpython} -m pip -q install ../../checkbox-ng + # Required because this provider depends on checkbox-support parsers & scripts + {envpython} -m pip -q install ../../checkbox-support + rm -f /var/tmp/checkbox-providers-develop/checkbox-provider-resource.provider + rm -f /var/tmp/checkbox-providers-develop/checkbox-provider-base.provider + # Required because this provider depends on the resource and base providers + {envpython} ../../providers/resource/manage.py develop + {envpython} ../../providers/base/manage.py develop + {envpython} manage.py develop + {envpython} manage.py validate + {envpython} -m coverage run manage.py test + {envpython} -m coverage report + {envpython} -m coverage xml + +[testenv:py35] +deps = + flake8 + coverage == 5.5 + natsort == 4.0.3 + requests == 2.9.1 + urwid == 1.3.1 + Jinja2 == 2.8 + MarkupSafe == 0.23 + XlsxWriter == 0.7.3 + tqdm == 4.19.5 + pyparsing == 2.0.3 + distro == 1.0.1 + PyYAML == 3.11 +setenv= +# we do not care about the package version in tox +# but it breaks some old python3.5 builds + SETUPTOOLS_SCM_PRETEND_VERSION=0.0 + +[testenv:py36] +deps = + flake8 + coverage == 5.5 + natsort == 4.0.3 + requests == 2.18.4 + urwid == 2.0.1 + Jinja2 == 2.10 + MarkupSafe == 1.1.0 + XlsxWriter == 0.9.6 + tqdm == 4.19.5 + pyparsing == 2.2.0 + distro == 1.0.1 + PyYAML == 3.12 + +[testenv:py38] +deps = + flake8 + coverage == 7.3.0 + natsort == 7.0.1 + requests == 2.22.0 + urwid == 2.0.1 + Jinja2 == 2.10.1 + MarkupSafe == 1.1.0 + XlsxWriter == 1.1.2 + tqdm == 4.30.0 + pyparsing == 2.4.6 + distro == 1.4.0 + PyYAML == 5.3.1 + +[testenv:py310] +deps = + flake8 + coverage == 7.3.0 + natsort == 8.0.2 + requests == 2.25.1 + urwid == 2.1.2 + Jinja2 == 3.0.3 + MarkupSafe == 2.0.1 + XlsxWriter == 3.0.2 + tqdm == 4.57.0 + pyparsing == 2.4.7 + distro == 1.7.0 + PyYAML == 6.0.1 diff --git a/contrib/genio/units/audio/category.pxu b/contrib/genio/units/audio/category.pxu new file mode 100644 index 0000000000..bad452485a --- /dev/null +++ b/contrib/genio/units/audio/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: genio-audio +_name: Genio Audio tests diff --git a/contrib/genio/units/audio/jobs.pxu b/contrib/genio/units/audio/jobs.pxu new file mode 100644 index 0000000000..0d651d42c3 --- /dev/null +++ b/contrib/genio/units/audio/jobs.pxu @@ -0,0 +1,246 @@ +id: genio-audio/earphone-playback +category_id: genio-audio +plugin: manual +flags: also-after-suspend +estimated_duration: 30.0 +_description: + Check that the various audio channels are working through Earphone / Headset 3.5mm jack port +_steps: + 1. Plug the earphone / headset to earphone 3.5mm jack port + 2. Set the sink as "Built-in Audio Earphone speaker + Lineout speaker" + $ pacmd set-default-sink 0 + 3. Set the sink port as Headphones + $ pacmd set-sink-port 0 "[Out] Headphones" + 4. Start channel testing + $ speaker-test -c 2 -r 48000 -f S16_LE -l 1 -t wav +_verification: + You should clearly hear a voice from the different audio channels + +id: genio-audio/speaker-playback +category_id: genio-audio +plugin: manual +flags: also-after-suspend +estimated_duration: 30.0 +_description: + Check that the various audio channels are working through Speaker 3.5mm jack port +_steps: + 1. Plug a headset / speaker to speaker 3.5mm jack port + 2. Set the sink as "Built-in Audio Earphone speaker + Lineout speaker" + $ pacmd set-default-sink 0 + 3. Set the sink port as Speaker + $ pacmd set-sink-port 0 "[Out] Speaker" + 4. Start channel testing + $ speaker-test -c 2 -r 48000 -f S16_LE -l 1 -t wav +_verification: + You should clearly hear a voice from the left audio channels. + +id: genio-audio/hdmi-playback +category_id: genio-audio +plugin: manual +flags: also-after-suspend +estimated_duration: 30.0 +_description: + Check that the various audio channels are working through external HDMI monitor +_steps: + 1. Connect an external monitor to HDMI TX interface (through HDMI cable) + 2. Set the sink as "Built-in Audio HDMI/DP output" + $ pacmd set-default-sink 1 + 3. Set the sink port as HDMI (G350 skips this step) + $ pacmd set-sink-port 1 "[Out] HDMI" + 4. Start channel testing + $ speaker-test -c 2 -r 48000 -f S16_LE -l 1 -t wav +_verification: + You should clearly hear a voice from the different audio channels. + +id: genio-audio/type-c-to-dp-playback +category_id: genio-audio +plugin: manual +flags: also-after-suspend +estimated_duration: 30.0 +_description: + Check that the various audio channels are working through external monitor with type-c cable +_steps: + 1. Connect an external monitor to type-c interface (through type-c to DP/HDMI cable or type-c cable) + 2. Set the sink as "Built-in Audio HDMI/DP output" + $ pacmd set-default-sink 1 + 3. Set the sink port as DP + $ pacmd set-sink-port 1 "[Out] DP" + 4. Start channel testing + $ speaker-test -c 2 -r 48000 -f S16_LE -l 1 -t wav +_verification: + You should clearly hear a voice from the different audio channels. + +id: genio-audio/usb-headset-playback +category_id: genio-audio +plugin: manual +flags: also-after-suspend +estimated_duration: 30.0 +_description: + Check that the various audio channels are working through USB headset +_steps: + 1. Plug the USB headset to device + 2. Find the sound card via command `$ aplay -l`. + For example, the new entry of physical sound card should likes bellow + card 1: MS [Jabra EVOLVE LINK MS], device 0: USB Audio [USB Audio] + Subdevices: 1/1 + Subdevice #0: subdevice #0 + 3. Start channels testing + $ speaker-test -D plughw:1,0 -c 2 -r 48000 -f S16_LE -l 1 -t wav +_verification: + You should clearly hear a voice from the different audio channels. + +id: genio-audio/pcm-playback +category_id: genio-audio +plugin: manual +flags: also-after-suspend +estimated_duration: 30.0 +imports: from com.canonical.plainbox import manifest +requires: + manifest.has_genio_pcm == 'True' +_description: + This test will check that loopback of PCM works correctly +_steps: + 1. GPIO jumper connection + # For G1200-evk + 1. Remove the jumper on J43 + 2. Use a jumper to short the IO67 and IO65 + # For G700 + Use a jumper to short the PCMD0 and PCMD1 + 2. Configure the mixer for alsa soundcard driver + $ amixer -c mt8395evk cset name='HP Mux' 'Audio Playback' + $ amixer -c mt8395evk cset name='LOL Mux' 0 + 3. Set the sink and source + # For G1200-evk + $ pacmd set-default-sink 2 + $ pacmd set-default-source 5 + # For G700 + $ pacmd set-default-sink 2 + $ pacmd set-default-source 7 + 4. Plug a headset to earphone 3.5mm jack port + 5. Playback as backgournd + $ speaker-test -D pulse -c 2 -r 48000 -f S16_LE -t wav -d 10 & + 6. Record and Play + $ arecord -D pulse -c 2 -r 48000 -f S16_LE | aplay -D hw:0,0 -c 2 -r 48000 -f S16_LE +_verification: + Did you hear the sound of loop audio channel from headset? + +id: genio-audio/headset-record-headset-playback +category_id: genio-audio +plugin: manual +flags: also-after-suspend +estimated_duration: 30.0 +_description: + This test will check that recording sound using the 3.5mm earphone / headset jack works correctly +_steps: + 1. Plug a headset to earphone 3.5mm jack port + 2. Set the sink and source + # For G1200-evk + $ pacmd set-default-sink 0 + $ pacmd set-sink-port 0 "[Out] Headphones" + $ pacmd set-default-source 3 + $ pacmd set-source-port 3 "[In] Headset" + # For G700 + $ pacmd set-default-sink 0 + $ pacmd set-sink-port 0 "[Out] Headphones" + $ pacmd set-default-source 4 + $ pacmd set-source-port 4 "[In] Headset" + # For G350 + $ pacmd set-default-sink 0 # jack + $ pacmd set-sink-port 0 "[Out] Headphones" # Headset + $ pacmd set-default-source 3 # PMIC + $ pacmd set-source-port 3 "[In] Mic2" # Headset Mic + 3. Record for 10 seconds to a wav file + $ arecord -D pulse -r 48000 -f S16_LE -d 10 headset_record.wav + 4. Play the record + $ aplay -D pulse -c 2 -r 48000 -f S16_LE headset_record.wav +_verification: + Did you hear your speech played back? + +id: genio-audio/amic-record-headset-playback +category_id: genio-audio +plugin: manual +flags: also-after-suspend +estimated_duration: 30.0 +imports: from com.canonical.plainbox import manifest +requires: + manifest.has_genio_amic == 'True' +_description: + This test will check that recording sound using the onboard AMIC works correctly +_steps: + 1. Plug a headset to earphone 3.5mm jack port + 2. Set the sink and source + # For G1200-evk + $ pacmd set-default-sink 0 + $ pacmd set-sink-port 0 "[Out] Headphones" + $ pacmd set-default-source 3 + $ pacmd set-source-port 3 "[In] Mic1" + # For G700 + $ pacmd set-default-sink 0 + $ pacmd set-sink-port 0 "[Out] Headphones" + $ pacmd set-default-source 4 + $ pacmd set-source-port 4 "[In] Mic1" + # For G350 + $ pacmd set-default-sink 0 # jack + $ pacmd set-sink-port 0 "[Out] Headphones" # Headset + $ pacmd set-default-source 3 # PMIC + $ pacmd set-source-port 3 "[In] Mic1" # AMIC + 2. Record for 10 seconds to a wav file + $ arecord -D pulse -c 2 -r 48000 -f S16_LE -d 10 amic_record.wav + 3. Play the record + $ aplay -D pulse -c 2 -r 48000 -f S16_LE amic_record.wav +_verification: + Did you hear your speech played back? + +id: genio-audio/dmic-record-headset-playback +category_id: genio-audio +plugin: manual +flags: also-after-suspend +estimated_duration: 30.0 +imports: from com.canonical.plainbox import manifest +requires: + manifest.has_genio_dmic == 'True' +_description: + This test will check that recording sound using the onboard DMIC works correctly +_steps: + 1. Plug a headset to earphone 3.5mm jack port + 2. Set the sink and source + # For G1200-evk + $ pacmd set-default-sink 0 + $ pacmd set-sink-port 0 "[Out] Headphones" + $ pacmd set-default-source 4 + # For G700 + $ pacmd set-default-sink 0 + $ pacmd set-sink-port 0 "[Out] Headphones" + $ pacmd set-default-source 5 + # For G350 + $ pacmd set-default-sink 0 # jack + $ pacmd set-sink-port 0 "[Out] Headphones" # Headset + $ pacmd set-default-source 2 # DMIC + $ pacmd set-source-port 3 "[In] Mic2" # Dmic doesn't work if [In] Mic1 is active + 2. Record for 10 seconds to a wav file + $ arecord -D pulse -c 2 -r 48000 -f S16_LE -d 10 dmic_record.wav + 3. Play the record + $ aplay -D pulse -c 2 -r 48000 -f S16_LE dmic_record.wav +_verification: + Did you hear your speech played back? + +id: genio-audio/usb-headset-record-playback +category_id: genio-audio +plugin: manual +flags: also-after-suspend +estimated_duration: 30.0 +_description: + This test will check that recording sound using the USB headset works correctly +_steps: + 1. Plug the USB headset to device + 2. Find the sound card via command `$ aplay -l`. + For example, the new entry of physical sound card should likes bellow + card 1: MS [Jabra EVOLVE LINK MS], device 0: USB Audio [USB Audio] + Subdevices: 1/1 + Subdevice #0: subdevice #0 + 3. Record for 10 seconds to a wav file with specific card number + $ arecord -D plughw:1,0 -c 1 -r 48000 -f S32_LE -d 10 usb_test.wav + 4. Play the record with specific card number + $ aplay -D plughw:1,0 -c 1 -r 48000 -f S32_LE usb_test.wav +_verification: + Did you hear your speech played back? diff --git a/contrib/genio/units/audio/manifest.pxu b/contrib/genio/units/audio/manifest.pxu new file mode 100644 index 0000000000..8fe5442235 --- /dev/null +++ b/contrib/genio/units/audio/manifest.pxu @@ -0,0 +1,17 @@ +unit: manifest entry +id: has_genio_amic +_name: AMIC (Analog Microphone) +_prompt: Does this machine have the following audio support? +value-type: bool + +unit: manifest entry +id: has_genio_dmic +_name: DMIC (Digital Microphone) +_prompt: Does this machine have the following audio support? +value-type: bool + +unit: manifest entry +id: has_genio_pcm +_name: PCM (Pulse Code Modulation) +_prompt: Does this machine have the following audio support? +value-type: bool diff --git a/contrib/genio/units/audio/test-plan.pxu b/contrib/genio/units/audio/test-plan.pxu new file mode 100644 index 0000000000..0a20aaae7d --- /dev/null +++ b/contrib/genio/units/audio/test-plan.pxu @@ -0,0 +1,54 @@ +id: genio-audio-full +unit: test plan +_name: Genio Audio test +_description: Full audio tests for G1200-evk, G700 and G350 platforms +include: +nested_part: + genio-audio-manual + genio-audio-automated + after-suspend-genio-audio-manual + after-suspend-genio-audio-automated + +id: genio-audio-manual +unit: test plan +_name: Genio Manual Audio test +_description: Manual audio test for G1200-evk, G700 and G350 platforms +include: + genio-audio/earphone-playback + genio-audio/speaker-playback + genio-audio/hdmi-playback + genio-audio/type-c-to-dp-playback + genio-audio/pcm-playback + genio-audio/usb-headset-playback + genio-audio/headset-record-headset-playback + genio-audio/amic-record-headset-playback + genio-audio/dmic-record-headset-playback + genio-audio/usb-headset-record-playback + +id: genio-audio-automated +unit: test plan +_name: Genio Auto Audio test +_description: Automated audio test for G1200-evk, G700 and G350 platforms +include: + +id: after-suspend-genio-audio-manual +unit: test plan +_name: Genio Manual audio test +_description: Manual after suspend audio test for G1200-evk, G700 and G350 platforms +include: + after-suspend-genio-audio/earphone-playback + after-suspend-genio-audio/speaker-playback + after-suspend-genio-audio/hdmi-playback + after-suspend-genio-audio/type-c-to-dp-playback + after-suspend-genio-audio/usb-headset-playback + after-suspend-genio-audio/pcm-playback + after-suspend-genio-audio/headset-record-headset-playback + after-suspend-genio-audio/amic-record-headset-playback + after-suspend-genio-audio/dmic-record-headset-playback + after-suspend-genio-audio/usb-headset-record-playback + +id: after-suspend-genio-audio-automated +unit: test plan +_name: Genio Auto Audio test +_description: Automated after suspend audio test for G1200-evk, G700 and G350 platforms +include: diff --git a/contrib/genio/units/boot/category.pxu b/contrib/genio/units/boot/category.pxu new file mode 100644 index 0000000000..99c3473cc1 --- /dev/null +++ b/contrib/genio/units/boot/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: genio-boot +_name: Genio boot tests diff --git a/contrib/genio/units/boot/jobs.pxu b/contrib/genio/units/boot/jobs.pxu new file mode 100644 index 0000000000..0f9e197d7c --- /dev/null +++ b/contrib/genio/units/boot/jobs.pxu @@ -0,0 +1,30 @@ +id: genio_boot_storage_device_name +plugin: resource +_summary: Collect and report the type of boot storage device +estimated_duration: 0.1 +command: + boot_partition.py --check_device_name + +id: genio-boot/device_name_check +plugin: shell +category_id: genio-boot +estimated_duration: 1 +user: root +flags: also-after-suspend +_summary: Verify if the model name is correct +command: + boot_partition.py --check_device_name --exit_when_check_fail + +unit: template +template-resource: genio_boot_storage_device_name +template-unit: job +id: genio-boot/verify_boot_partition_{device} +depends: genio-boot/device_name_check +plugin: shell +category_id: genio-boot +estimated_duration: 1 +user: root +flags: also-after-suspend +_summary: Verify the boot partition +command: + boot_partition.py --path {path} diff --git a/contrib/genio/units/boot/test-plan.pxu b/contrib/genio/units/boot/test-plan.pxu new file mode 100644 index 0000000000..5b80ebfe87 --- /dev/null +++ b/contrib/genio/units/boot/test-plan.pxu @@ -0,0 +1,19 @@ +id: genio-boot-automated +unit: test plan +_name: Genio Boot auto tests +_description: Automated Genio Graphic tests for devices +include: + genio-boot/device_name_check + genio-boot/verify_boot_partition_.* +bootstrap_include: + genio_boot_storage_device_name + +id: after-suspend-genio-boot-automated +unit: test plan +_name: Genio Boot auto tests (after suspend) +_description: Automated Genio Graphic tests for devices +include: + after-suspend-genio-boot/device_name_check + after-suspend-genio-boot/verify_boot_partition_.* +bootstrap_include: + genio_boot_storage_device_name \ No newline at end of file diff --git a/contrib/genio/units/device/jobs.pxu b/contrib/genio/units/device/jobs.pxu new file mode 100644 index 0000000000..d775927ba6 --- /dev/null +++ b/contrib/genio/units/device/jobs.pxu @@ -0,0 +1,14 @@ +id: genio_device_soc +plugin: resource +_summary: Collect and report the type of SoC +estimated_duration: 0.1 +command: + output=$(tr -d '\0' < /proc/device-tree/compatible) + # Set comma as delimiter + IFS=',' + read -a output_arr <<< "$output" + # Set dash as delimiter + IFS='-' + read -a s <<< "${output_arr[1]}" + echo "SoC: ${s[0]}" + echo diff --git a/contrib/genio/units/display/category.pxu b/contrib/genio/units/display/category.pxu new file mode 100644 index 0000000000..cd5a637bf7 --- /dev/null +++ b/contrib/genio/units/display/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: genio-display +_name: Genio Display tests diff --git a/contrib/genio/units/display/jobs.pxu b/contrib/genio/units/display/jobs.pxu new file mode 100644 index 0000000000..04f62ce7a0 --- /dev/null +++ b/contrib/genio/units/display/jobs.pxu @@ -0,0 +1,70 @@ +id: genio-display/boot-logo +plugin: manual +category_id: genio-display +imports: from com.canonical.plainbox import manifest +requires: manifest.has_genio_dsi == "True" +estimated_duration: 60 +_purpose: + This test verifies that boot logo can be shown on built-in DSI display. + According to the spec, boot logo should only be shown on DSI display even there's other display connected. +_steps: + 1. If your DSI display is not already connected, power device off and connect it then power on +_verification: + For G1200-evk and G700 + 1. Was the boot logo shown on DIS panel correctly? + For G350 + 1. No boot logo be shown on DSI panel. (Not supported) + +id: genio-display/brightness-dsi +plugin:user-interact-verify +category_id: genio-display +flags: also-after-suspend +estimated_duration: 3.0 +user: root +environ: GENIO_DEVICE +imports: from com.canonical.plainbox import manifest +requires: manifest.has_genio_dsi == "True" +_summary: + Test the brightness of DSI display +_purpose: + This test tests if the brightness of DSI display can be channged to different value +command: + brightness_test.py -p "$GENIO_DEVICE" -d dsi +_verification: + 1. Did the screen brightness be changed to different level? + +id: genio-display/brightness-edp +plugin:user-interact-verify +category_id: genio-display +flags: also-after-suspend +estimated_duration: 3.0 +user: root +environ: GENIO_DEVICE +imports: from com.canonical.plainbox import manifest +requires: manifest.has_genio_edp == "True" and genio_device_soc.SoC in ['mt8390','mt8395'] +_summary: + Test the brightness of eDP display +_purpose: + This test tests if the brightness of eDP display can be channged to different value +command: + brightness_test.py -p "$GENIO_DEVICE" -d edp +_verification: + 1. Did the screen brightness be changed to different level? + +id: genio-display/brightness-lvds +plugin:user-interact-verify +category_id: genio-display +flags: also-after-suspend +estimated_duration: 3.0 +user: root +environ: GENIO_DEVICE +imports: from com.canonical.plainbox import manifest +requires: manifest.has_genio_lvds == "True" +_summary: + Test the brightness of LVDS display +_purpose: + This test tests if the brightness of LVDS display can be channged to different value +command: + brightness_test.py -p "$GENIO_DEVICE" -d lvds +_verification: + 1. Did the screen brightness be changed to different level? diff --git a/contrib/genio/units/display/manifest.pxu b/contrib/genio/units/display/manifest.pxu new file mode 100644 index 0000000000..9d17664a2f --- /dev/null +++ b/contrib/genio/units/display/manifest.pxu @@ -0,0 +1,17 @@ +unit: manifest entry +id: has_genio_dsi +_name: DSI Display Panel +_prompt: Does this machine have the following built-in display? +value-type: bool + +unit: manifest entry +id: has_genio_edp +_name: eDP Display Panel +_prompt: Does this machine have the following built-in display? +value-type: bool + +unit: manifest entry +id: has_genio_lvds +_name: LVDS Display Panel +_prompt: Does this machine have the following built-in display? +value-type: bool diff --git a/contrib/genio/units/display/test-plan.pxu b/contrib/genio/units/display/test-plan.pxu new file mode 100644 index 0000000000..e73608b4ff --- /dev/null +++ b/contrib/genio/units/display/test-plan.pxu @@ -0,0 +1,58 @@ +id: genio-display-full +unit: test plan +_name: Genio Display tests +_description: Full Genio Display tests for devices +include: +nested_part: + genio-display-manual + genio-display-automated + +id: genio-display-manual +unit: test plan +_name: Genio Display manual tests +_description: Manual Genio Display tests for devices +include: + genio-display/boot-logo + genio-display/multi-built-in-displays-dsi-and-edp + genio-display/multi-built-in-displays-dsi-and-lvds + genio-display/hdmi-hotplug + genio-display/type-c-to-displayport-hotplug + genio-display/type-c-to-displayport-and-hdmi-hotplug + genio-display/brightness-dsi + genio-display/brightness-edp + genio-display/brightness-lvds + +id: genio-display-automated +unit: test plan +_name: Genio Display auto tests +_description: Automated Genio Display tests for devices +include: + +id: after-suspend-genio-display-full +unit: test plan +_name: Genio Display tests (after suspend) +_description: Full after suspend Genio Display tests for devices +include: +nested_part: + after-suspend-genio-display-manual + after-suspend-genio-display-automated + +id: after-suspend-genio-display-manual +unit: test plan +_name: Genio Display manual tests (after suspend) +_description: Manual after suspend Genio Display tests for devices +include: + after-suspend-genio-display/multi-built-in-displays-dsi-and-edp + after-suspend-genio-display/multi-built-in-displays-dsi-and-lvds + after-suspend-genio-display/hdmi-hotplug + after-suspend-genio-display/type-c-to-displayport-hotplug + after-suspend-genio-display/type-c-to-displayport-and-hdmi-hotplug + after-suspend-genio-display/brightness-dsi + after-suspend-genio-display/brightness-edp + after-suspend-genio-display/brightness-lvds + +id: after-suspend-genio-display-automated +unit: test plan +_name: Genio Display auto tests (after suspend) +_description: Automated after suspend Genio Display tests for devices +include: diff --git a/contrib/genio/units/ebbr/category.pxu b/contrib/genio/units/ebbr/category.pxu new file mode 100644 index 0000000000..e55ced6e0e --- /dev/null +++ b/contrib/genio/units/ebbr/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: genio-ebbr +_name: Genio Embedded Base Boot Requirements (EBBR) tests diff --git a/contrib/genio/units/ebbr/jobs.pxu b/contrib/genio/units/ebbr/jobs.pxu new file mode 100644 index 0000000000..3ce797364b --- /dev/null +++ b/contrib/genio/units/ebbr/jobs.pxu @@ -0,0 +1,40 @@ +id: fwts_ebbr_resource +_summary: Generates a Fwts EBBR cases for EBBR test +estimated_duration: 0.02 +category_id: genio-ebbr +plugin: resource +environ: GPIO_LOOPBACK_PIN_MAPPING GENIO_DEVICE +command: + ebbr_resource.py + +unit: template +template-resource: fwts_ebbr_resource +template-unit: job +id: genio-ebbr/fwts-ebbr-{case} +_summary: Fwts EBBR {case} test +_description: {description} +plugin: shell +user: root +category_id: genio-ebbr +estimated_duration: 5s +flags: also-after-suspend +environ: PLAINBOX_SESSION_SHARE +command: + fwts -q {case} -r "$PLAINBOX_SESSION_SHARE"/{case}.log + RET=$? + cat "$PLAINBOX_SESSION_SHARE"/{case}.log + exit $RET + +id: genio-ebbr/ebbr-boot +category_id: genio-ebbr +plugin: shell +user: root +estimated_duration: 0.5 +_summary: UEFI-EBBR Boot Flow +_description: Standard UEFI-EBBR boot flow porposed by the ARM SystemReady IR specification +command: + if ! dmesg | grep -P 'efi: EFI .* by Das U-Boot' ; then + echo "FAIL: unable to find 'efi: EFI ... by Das U-Boot' in dmesg" + exit 1 + fi + echo "PASS: System follows the UEFI-EBBR boot flow." diff --git a/contrib/genio/units/ebbr/test-plan.pxu b/contrib/genio/units/ebbr/test-plan.pxu new file mode 100644 index 0000000000..65d8088a38 --- /dev/null +++ b/contrib/genio/units/ebbr/test-plan.pxu @@ -0,0 +1,43 @@ +id: genio-ebbr-full +unit: test plan +_name: Genio EBBR tests +_description: Full Genio EBBR tests for devices +include: +nested_part: + genio-ebbr-manual + genio-ebbr-automated + +id: genio-ebbr-manual +unit: test plan +_name: Genio EBBR manual tests +_description: Manual Genio EBBR tests for devices +include: + +id: genio-ebbr-automated +unit: test plan +_name: Genio EBBR auto tests +_description: Automated Genio EBBR tests for devices +bootstrap_include: + fwts_ebbr_resource +include: + genio-ebbr/ebbr-boot + genio-ebbr/fwts-ebbr-.* + +id: after-suspend-genio-ebbr-full +unit: test plan +_name: Genio EBBR tests (after suspend) +_description: Full after suspend Genio EBBR tests for devices +include: +nested_part: + +id: after-suspend-genio-ebbr-manual +unit: test plan +_name: Genio EBBR manual tests (after suspend) +_description: Manual after suspend Genio EBBR tests for devices +include: + +id: after-suspend-genio-ebbr-automated +unit: test plan +_name: Genio EBBR auto tests (after suspend) +_description: Automated after suspend Genio EBBR tests for devices +include: diff --git a/contrib/genio/units/graphic/category.pxu b/contrib/genio/units/graphic/category.pxu new file mode 100644 index 0000000000..0ca983f6ff --- /dev/null +++ b/contrib/genio/units/graphic/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: genio-graphic +_name: Genio Graphic tests diff --git a/contrib/genio/units/graphic/jobs.pxu b/contrib/genio/units/graphic/jobs.pxu new file mode 100644 index 0000000000..0a035bf415 --- /dev/null +++ b/contrib/genio/units/graphic/jobs.pxu @@ -0,0 +1,124 @@ +id: genio-graphic/eglinfo +category_id: genio-graphic +plugin: shell +imports: from com.canonical.certification import package +requires: + package.name == "libmali-mtk-8188" or package.name == "libmali-mtk-8195" or package.name == "libmali-mtk-8365" +flags: also-after-suspend +environ: GENIO_GPU_DRIVER_VERSION +command: + OUTPUT=$( mktemp ) + EXIT=0 + eglinfo | tee "$OUTPUT" + if grep -qw "eglInitialize failed" "$OUTPUT" ; then + echo "FAIL: found 'eglInitialize failed' error message" + EXIT=1 + fi + if ! grep -qw "EGL client APIs: OpenGL_ES" "$OUTPUT" ; then + echo "FAIL: client API is not OpenGL_ES" + EXIT=1 + fi + if ! grep -qw "$GENIO_GPU_DRIVER_VERSION" "$OUTPUT" ; then + echo "FAIL: version of GPU driver is not '$GENIO_GPU_DRIVER_VERSION'" + EXIT=1 + fi + exit $EXIT +_summary: Check if the EGL API works +_description: + Mali Core should be able to manipulate EGL API. + The way to check it is using "eglinfo" command to get related infomation. + +id: genio-graphic/rendering-api-vulkan +category_id: genio-graphic +plugin: shell +imports: from com.canonical.certification import package +requires: + package.name == "libmali-mtk-8188" or package.name == "libmali-mtk-8195" or package.name == "libmali-mtk-8365" +flags: also-after-suspend +command: + OUTPUT=$( mktemp ) + EXIT=0 + VK_ICD_FILENAMES=/usr/share/vulkan/icd.d/mali.json vulkaninfo | tee "$OUTPUT" + if ! grep -qw "$GENIO_GPU_DRIVER_VERSION" "$OUTPUT" ; then + echo "FAIL: version of GPU driver is not '$GENIO_GPU_DRIVER_VERSION'" + EXIT=1 + fi + exit $EXIT +_summary: Check if the Vulkan Rendring API works +_description: + Mali Core should be able to manipulate Vulkan Rendering API. + The way to check it is using "vulkaninfo" command to get related infomation. + +id: genio-graphic/computation-api-opencl +category_id: genio-graphic +plugin: shell +imports: from com.canonical.certification import package +requires: + package.name == "libmali-mtk-8188" or package.name == "libmali-mtk-8195" or package.name == "libmali-mtk-8365" +flags: also-after-suspend +command: + OUTPUT=$( mktemp ) + EXIT=0 + clinfo | tee "$OUTPUT" + if ! grep -qw "$GENIO_GPU_DRIVER_VERSION" "$OUTPUT" ; then + echo "FAIL: version of GPU driver is not '$GENIO_GPU_DRIVER_VERSION'" + EXIT=1 + fi + exit $EXIT +_summary: Check if the OpenCL Rendring API works +_description: + Mali Core should be able to manipulate OpenCL Computation API. + The way to check it is using "clinfo" command to get related infomation. + +id: genio-graphic/set-device-to-performance-mode +plugin:shell +category_id: genio-graphic +flags: also-after-suspend +estimated_duration: 3.0 +user: root +environ: GENIO_DEVICE +_summary: + Set device to performance mode before perfoming glmark2-es2-wayland benchmark +command: + set_as_performance_mode.sh "$GENIO_DEVICE" set-to-performance + +id: genio-graphic/glmark2-es2-wayland +plugin: shell +category_id: genio-graphic +flags: also-after-suspend +depends: genio-graphic/set-device-to-performance-mode +imports: from com.canonical.certification import executable +requires: executable.name == "glmark2-es2-wayland" +estimated_duration: 60 +environ: GENIO_GPU_DRIVER_VERSION +_summary: Run OpenGL ES 2.0 Wayland benchmark on the video card +_purpose: + To test if the score of OpenGL ES 2.0 Wayland benchmark meets the expectation. + The resolution of glmark2-es2-wayland is 1920x1080 +command: + set -e + output=$(glmark2-es2-wayland -s 1920x1080 --data-path $SNAP/usr/share/glmark2) + echo "$output" + if ! echo "$output" | grep "GL_VENDOR" | grep -q "ARM"; then + echo "FAIL: Wrong vendor!" + echo "The expected 'GL_VENDOR' is 'ARM'!" + exit 1 + fi + if ! echo "$output" | grep "GL_VERSION" | grep -q "$GENIO_GPU_DRIVER_VERSION"; then + echo "FAIL: Wrong version!" + echo "The expected 'GL_VERSION' should include '$GENIO_GPU_DRIVER_VERSION'" + exit 1 + fi + +id: genio-graphic/set-device-back-to-origianl-mode +plugin:shell +category_id: genio-graphic +flags: also-after-suspend +depends: genio-graphic/set-device-to-performance-mode +estimated_duration: 3.0 +user: root +environ: GENIO_DEVICE +_summary: + Set device back to origianl mode +command: + set_as_performance_mode.sh "$GENIO_DEVICE" reset diff --git a/contrib/genio/units/graphic/test-plan.pxu b/contrib/genio/units/graphic/test-plan.pxu new file mode 100644 index 0000000000..435be9d9c6 --- /dev/null +++ b/contrib/genio/units/graphic/test-plan.pxu @@ -0,0 +1,53 @@ +id: genio-graphic-full +unit: test plan +_name: Genio Graphic tests +_description: Full Genio Graphic tests for devices +include: +nested_part: + genio-graphic-manual + genio-graphic-automated + +id: genio-graphic-manual +unit: test plan +_name: Genio Graphic manual tests +_description: Manual Genio Graphic tests for devices +include: + +id: genio-graphic-automated +unit: test plan +_name: Genio Graphic auto tests +_description: Automated Genio Graphic tests for devices +include: + genio-graphic/eglinfo + genio-graphic/rendering-api-vulkan + genio-graphic/computation-api-opencl + genio-graphic/set-device-to-performance-mode + genio-graphic/glmark2-es2-wayland + genio-graphic/set-device-back-to-origianl-mode + +id: after-suspend-genio-graphic-full +unit: test plan +_name: Genio Graphic tests (after suspend) +_description: Full after suspend Genio Graphic tests for devices +include: +nested_part: + after-suspend-genio-graphic-manual + after-suspend-genio-graphic-automated + +id: after-suspend-genio-graphic-manual +unit: test plan +_name: Genio Graphic manual tests (after suspend) +_description: Manual after suspend Genio Graphic tests for devices +include: + +id: after-suspend-genio-graphic-automated +unit: test plan +_name: Genio Graphic auto tests (after suspend) +_description: Automated after suspend Genio Graphic tests for devices +include: + after-suspend-genio-graphic/eglinfo + after-suspend-genio-graphic/rendering-api-vulkan + after-suspend-genio-graphic/computation-api-opencl + after-suspend-genio-graphic/set-device-to-performance-mode + after-suspend-genio-graphic/glmark2-es2-wayland + after-suspend-genio-graphic/set-device-back-to-origianl-mode diff --git a/contrib/genio/units/hdmi-rx/category.pxu b/contrib/genio/units/hdmi-rx/category.pxu new file mode 100644 index 0000000000..33dce56047 --- /dev/null +++ b/contrib/genio/units/hdmi-rx/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: genio-hdmi-rx +_name: Genio HDMI RX tests diff --git a/contrib/genio/units/hdmi-rx/jobs.pxu b/contrib/genio/units/hdmi-rx/jobs.pxu new file mode 100644 index 0000000000..200bd66473 --- /dev/null +++ b/contrib/genio/units/hdmi-rx/jobs.pxu @@ -0,0 +1,255 @@ +id: genio-hdmi-rx/module-detect +plugin: shell +category_id: genio-hdmi-rx +estimated_duration: 1 +imports: from com.canonical.plainbox import manifest +requires: manifest.has_hdmi_rx == "True" +user: root +_summary: Check the mtk_hdmirx module has been loaded +command: + echo "Check the mtk_hdmirx module has been loaded..." + lsmod | grep -w mtk_hdmirx + if [[ "$?" -ne 0 ]]; then + echo "Failed: The 'mtk_hdmirx' module isn't loaded!" + exit 1 + fi + echo + echo "Pass" + +id: genio-hdmi-rx/plug-event-detect +plugin: user-interact +category_id: genio-hdmi-rx +estimated_duration: 20 +depends: genio-hdmi-rx/module-detect +imports: + from com.canonical.plainbox import manifest + from com.canonical.certification import executable +requires: + manifest.has_hdmi_rx == "True" + executable.name == "genio-test-tool.hdmi-rx-tool" +flags: also-after-suspend +user: root +command: + EXIT=0 + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log start_observing + echo + expected_events='HDMI_RX_PWR_5V_CHANGE HDMI_RX_PLUG_IN HDMI_RX_TIMING_LOCK HDMI_RX_AUD_LOCK' + for event in $expected_events + do + echo "Checking event: $event..." + if [ "$(grep -w $event $temp_log)" == "" ]; then + echo "- FAIL: Unable to find the '$event' event" + EXIT=1 + else + echo "- PASS" + fi + done + exit $EXIT +_summary: Check the event by hdmi_rx tool while plugging the HDMI cable to HDMI RX port +_purpose: + This test checks the HDMI_RX_PWR_5V_CHANGE, HDMI_RX_PLUG_IN, HDMI_RX_TIMING_LOCK and HDMI_RX_AUD_LOCK events can be detected + while the HDMI RX port be connected with a Host or Player such as Laptop through HDMI cable. +_steps: + On Host or Player side such as a Laptop with Ubuntu 22.04 Desktop Image: + 1. Check and set the value of Sample Specification to be "s16le 2ch 48000Hz" + - List all sinks via command '$ pactl list sinks' and check the value of specific HDMI output + 2. Plug a HDMI cable to Host or Player + On DUT side: + 1. Press "Enter" to perform event detection + 2. You will have 15 seconds to plug the HDMI cable to HDMI RX port + +id: genio-hdmi-rx/check-hdmi-cable-connection +plugin: shell +category_id: genio-hdmi-rx +estimated_duration: 5 +depends: genio-hdmi-rx/module-detect +imports: + from com.canonical.plainbox import manifest + from com.canonical.certification import executable +requires: + manifest.has_hdmi_rx == "True" + executable.name == "genio-test-tool.hdmi-rx-tool" +flags: also-after-suspend +user: root +command: + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log check_cable + echo + hdmirx_output_checker.sh $temp_log verify_check_cable_output "hdmi connected" +_summary: Check the HDMI connection by using hdmi_rx tool +_description: + This test checks the hdmi_rx tool should shows the "hdmi connected" information after + the HDMI RX port be connected with a Host or Player such as Laptop through HDMI cable. + +id: genio-hdmi-rx/check-video-info +plugin: shell +category_id: genio-hdmi-rx +estimated_duration: 5 +depends: + genio-hdmi-rx/module-detect + genio-hdmi-rx/check-hdmi-cable-connection +imports: + from com.canonical.plainbox import manifest + from com.canonical.certification import executable +requires: + manifest.has_hdmi_rx == "True" + executable.name == "genio-test-tool.hdmi-rx-tool" +flags: also-after-suspend +user: root +command: + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log check_video_locked + echo + hdmirx_output_checker.sh $temp_log verify_check_video_locked_output "video locked" + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log get_video_info + echo + expected_values="1920 1080 60" + hdmirx_output_checker.sh $temp_log verify_get_video_info_output "${expected_values}" +_summary: Check the event by hdmi_rx tool while plugging the HDMI cable to HDMI RX port +_description: + This test checks some video information from hdmi_rx tool should match the expectation. + It expectes v.hactive = 1920, v.vactive = 1080 and v.frame_rate = 60 once the HDMI RX port + be connected with a Host or Player such as Laptop through HDMI cable. + +id: genio-hdmi-rx/check-audio-info +plugin: shell +category_id: genio-hdmi-rx +estimated_duration: 5 +depends: + genio-hdmi-rx/module-detect + genio-hdmi-rx/check-hdmi-cable-connection +imports: + from com.canonical.plainbox import manifest + from com.canonical.certification import executable +requires: + manifest.has_hdmi_rx == "True" + executable.name == "genio-test-tool.hdmi-rx-tool" +flags: also-after-suspend +user: root +command: + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log check_audio_locked + echo + hdmirx_output_checker.sh $temp_log verify_check_audio_locked_output "video locked" + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log get_audio_info + echo + expected_values="24 bits, Channel Number [2], 48.0 kHz" + hdmirx_output_checker.sh $temp_log verify_get_audio_info_output "${expected_values}" +_summary: Check the event by hdmi_rx tool while plugging the HDMI cable to HDMI RX port +_description: + This test checks some audio information from hdmi_rx tool should match the expectation. + It expectes '24 bits' in 'Audio Bits' section , 'Channel Number [2]' in 'Audio Channel Info' section and + '48.0 kHz' in 'Audio Sample Freq' section once the HDMI RX port be connected with a Host or Player such as Laptop through HDMI cable. + +id: genio-hdmi-rx/disable-hdmi +plugin: shell +category_id: genio-hdmi-rx +estimated_duration: 5 +depends: + genio-hdmi-rx/module-detect + genio-hdmi-rx/check-hdmi-cable-connection +imports: + from com.canonical.plainbox import manifest + from com.canonical.certification import executable +requires: + manifest.has_hdmi_rx == "True" + executable.name == "genio-test-tool.hdmi-rx-tool" +flags: also-after-suspend +user: root +command: + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log disable_hdmi + hdmirx_tool_runner.sh $temp_log check_cable + echo + hdmirx_output_checker.sh $temp_log verify_check_cable_output "hdmi disconnected" +_summary: Disable the HDMI RX functionaliy by hdmi_rx tool +_description: + This test will disable the functionaliy of HDMI RX by using hdmi_rx tool and the HDMI RX port still be connected. + +id: genio-hdmi-rx/enable-hdmi +plugin: shell +category_id: genio-hdmi-rx +estimated_duration: 5 +depends: + genio-hdmi-rx/module-detect + genio-hdmi-rx/disable-hdmi +imports: + from com.canonical.plainbox import manifest + from com.canonical.certification import executable +requires: + manifest.has_hdmi_rx == "True" + executable.name == "genio-test-tool.hdmi-rx-tool" +flags: also-after-suspend +user: root +command: + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log enable_hdmi + hdmirx_tool_runner.sh $temp_log check_cable + echo + hdmirx_output_checker.sh $temp_log verify_check_cable_output "hdmi connected" + # Verify Video info + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log check_video_locked + echo + hdmirx_output_checker.sh $temp_log verify_check_video_locked_output "video locked" + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log get_video_info + echo + expected_values="1920 1080 60" + hdmirx_output_checker.sh $temp_log verify_get_video_info_output "${expected_values}" + # Verify Audio info + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log check_audio_locked + echo + hdmirx_output_checker.sh $temp_log verify_check_audio_locked_output "video locked" + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log get_audio_info + echo + expected_values="24 bits, Channel Number [2], 48.0 kHz" + hdmirx_output_checker.sh $temp_log verify_get_audio_info_output "${expected_values}" +_summary: Enable the HDMI RX functionaliy by hdmi_rx tool +_description: + This test will enable the functionaliy of HDMI RX by using hdmi_rx tool and the HDMI RX port still be connected. + +id: genio-hdmi-rx/unplug-event-detect +plugin: user-interact +category_id: genio-hdmi-rx +estimated_duration: 20 +depends: genio-hdmi-rx/module-detect +imports: + from com.canonical.plainbox import manifest + from com.canonical.certification import executable +requires: + manifest.has_hdmi_rx == "True" + executable.name == "genio-test-tool.hdmi-rx-tool" +flags: also-after-suspend +user: root +command: + EXIT=0 + temp_log=$( mktemp ) + hdmirx_tool_runner.sh $temp_log start_observing + echo + expected_events='HDMI_RX_AUD_UNLOCK HDMI_RX_TIMING_UNLOCK HDMI_RX_PWR_5V_CHANGE HDMI_RX_PLUG_OUT' + for event in $expected_events + do + echo "Checking event: $event ..." + if [ "$(grep -w $event $temp_log)" == "" ]; then + echo "- FAIL: Unable to find the '$event' event" + EXIT=1 + else + echo "- PASS" + fi + done + exit $EXIT +_summary: Check the event by hdmi_rx tool while removing the HDMI cable from HDMI RX port +_purpose: + This test checks the HDMI_RX_AUD_UNLOCK, HDMI_RX_TIMING_UNLOCK, HDMI_RX_PWR_5V_CHANGE and HDMI_RX_PLUG_OUT events can be detected + while removing the HDMI cable from HDMI RX port. +_steps: + 1. Plug the HDMI cable to HDMI RX port if it's not connected yet + 2. Press "Enter" to perform event detection + 3. You will have 15 seconds to remove the HDMI cable from HDMI RX port + diff --git a/contrib/genio/units/hdmi-rx/manifest.pxu b/contrib/genio/units/hdmi-rx/manifest.pxu new file mode 100644 index 0000000000..aeb551722e --- /dev/null +++ b/contrib/genio/units/hdmi-rx/manifest.pxu @@ -0,0 +1,5 @@ +unit: manifest entry +id: has_hdmi_rx +_name: HDMI RX (Input) +_prompt: Does this machine support the HDMI RX feature? +value-type: bool diff --git a/contrib/genio/units/hdmi-rx/test-plan.pxu b/contrib/genio/units/hdmi-rx/test-plan.pxu new file mode 100644 index 0000000000..450d31fc56 --- /dev/null +++ b/contrib/genio/units/hdmi-rx/test-plan.pxu @@ -0,0 +1,56 @@ +id: genio-hdmi-rx-full +unit: test plan +_name: genio HDMI RX tests +_description: Full genio HDMI RX tests for devices +include: +nested_part: + genio-hdmi-rx-manual + genio-hdmi-rx-automated + +id: genio-hdmi-rx-manual +unit: test plan +_name: genio HDMI RX manual tests +_description: Manual genio HDMI RX tests for devices +include: + genio-hdmi-rx/module-detect + genio-hdmi-rx/plug-event-detect + genio-hdmi-rx/check-hdmi-cable-connection + genio-hdmi-rx/check-video-info + genio-hdmi-rx/check-audio-info + genio-hdmi-rx/disable-hdmi + genio-hdmi-rx/enable-hdmi + genio-hdmi-rx/unplug-event-detect + +id: genio-hdmi-rx-automated +unit: test plan +_name: genio HDMI RX auto tests +_description: Automated genio HDMI RX tests for devices +include: + +id: after-suspend-genio-hdmi-rx-full +unit: test plan +_name: genio HDMI RX tests (after suspend) +_description: Full after suspend genio HDMI RX tests for devices +include: +nested_part: + after-suspend-genio-hdmi-rx-manual + after-suspend-genio-hdmi-rx-automated + +id: after-suspend-genio-hdmi-rx-manual +unit: test plan +_name: genio HDMI RX manual tests (after suspend) +_description: Manual after suspend genio HDMI RX tests for devices +include: + after-suspend-genio-hdmi-rx/plug-event-detect + after-suspend-genio-hdmi-rx/check-hdmi-cable-connection + after-suspend-genio-hdmi-rx/check-video-info + after-suspend-genio-hdmi-rx/check-audio-info + after-suspend-genio-hdmi-rx/disable-hdmi + after-suspend-genio-hdmi-rx/enable-hdmi + after-suspend-genio-hdmi-rx/unplug-event-detect + +id: after-suspend-genio-hdmi-rx-automated +unit: test plan +_name: genio HDMI RX auto tests (after suspend) +_description: Automated after suspend genio HDMI RX tests for devices +include: diff --git a/contrib/genio/units/info/category.pxu b/contrib/genio/units/info/category.pxu new file mode 100644 index 0000000000..05b6fb6ffd --- /dev/null +++ b/contrib/genio/units/info/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: genio-info +_name: Genio Information tests diff --git a/contrib/genio/units/info/jobs.pxu b/contrib/genio/units/info/jobs.pxu new file mode 100644 index 0000000000..23be76a50e --- /dev/null +++ b/contrib/genio/units/info/jobs.pxu @@ -0,0 +1,8 @@ +id: genio-info/fw_printenv +category_id: genio-info +plugin: shell +user: root +estimated_duration: 0.5 +_summary: Dump u-boot environment variables from OS via command fw_printenv +_description: Dump u-boot environment variables +command: fw_printenv diff --git a/contrib/genio/units/info/test-plan.pxu b/contrib/genio/units/info/test-plan.pxu new file mode 100644 index 0000000000..1e25a0eb3f --- /dev/null +++ b/contrib/genio/units/info/test-plan.pxu @@ -0,0 +1,39 @@ +id: genio-info-full +unit: test plan +_name: Genio Information tests +_description: Full Genio information tests for devices +include: +nested_part: + genio-info-manual + genio-info-automated + +id: genio-info-manual +unit: test plan +_name: Genio Information manual tests +_description: Manual Genio information tests for devices +include: + +id: genio-info-automated +unit: test plan +_name: Genio Information auto tests +_description: Automated Genio information tests for devices +include: + genio-info/fw_printenv + +id: after-suspend-genio-info-full +unit: test plan +_name: Genio Information tests (after suspend) +_description: Full after suspend Genio information tests for devices +include: + +id: after-suspend-genio-info-manual +unit: test plan +_name: Genio Information manual tests (after suspend) +_description: Manual after suspend Genio information tests for devices +include: + +id: after-suspend-genio-info-automated +unit: test plan +_name: Genio Information auto tests (after suspend) +_description: Automated after suspend Genio information tests for devices +include: diff --git a/contrib/genio/units/peripheral/category.pxu b/contrib/genio/units/peripheral/category.pxu new file mode 100644 index 0000000000..3b0ef12d27 --- /dev/null +++ b/contrib/genio/units/peripheral/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: genio-peripheral +_name: Genio Peripheral tests diff --git a/contrib/genio/units/peripheral/jobs.pxu b/contrib/genio/units/peripheral/jobs.pxu new file mode 100644 index 0000000000..64dfc05687 --- /dev/null +++ b/contrib/genio/units/peripheral/jobs.pxu @@ -0,0 +1,197 @@ +id: gpio_loopback_pin_mapping +_summary: Generates a GPIO loopback pin mapping for GPIO loopback test +_description: + A GPIO loopback pin mapping. By giving a pair of GPIO pin on machnie to generates test jobs. + Usage of parameter: + GPIO_LOOPBACK_PIN_MAPPING=output_gpio_chip_number:physical_output_port:gpio_output_pin:input_gpio_chip_number:physical_input_port:gpio_input_pin ... + e.g. GPIO_LOOPBACK_PIN_MAPPING=0:J1:21:0:J3:134 1:3:68:2:4:69 +estimated_duration: 0.02 +category_id: genio-peripheral +plugin: resource +environ: GPIO_LOOPBACK_PIN_MAPPING +command: + awk '{ + split($0, record, " ") + for (i in record) { + split(record[i], data, ":") + printf "OUTPUT_GPIO_CHIP_NUMBER: %s\nPHYSICAL_OUTPUT_PORT: %s\nGPIO_OUTPUT_PIN: %s\nINPUT_GPIO_CHIP_NUMBER: %s\nPHYSICAL_INPUT_PORT: %s\nGPIO_INPUT_PIN: %s\n", data[1], data[2], data[3], data[4], data[5], data[6] + } + }' <<< "$GPIO_LOOPBACK_PIN_MAPPING" + +id: genio-peripheral/gpio-loopback-on-phycial-output-{{PHYSICAL_OUTPUT_PORT}}-input-{{PHYSICAL_INPUT_PORT}} +unit: template +template-resource: gpio_loopback_pin_mapping +template-unit: job +template-engine: jinja2 +_summary: GPIO loopback test on physical port {{PHYSICAL_OUTPUT_PORT}} and {{PHYSICAL_INPUT_PORT}} +_description: + GPIO loopback test between physical output port {{PHYSICAL_INPUT_PORT}} + and physical input port {{PHYSICAL_INPUT_PORT}} on outpu GPIO Chip gpiochip{{OUTPUT_GPIO_CHIP_NUMBER}} and input GPIO Chip gpiochip{{INPUT_GPIO_CHIP_NUMBER}} +plugin: shell +user: root +category_id: genio-peripheral +estimated_duration: 5s +imports: from com.canonical.plainbox import manifest +environ: GENIO_DEVICE +requires: + manifest.has_genio_gpio_lookback == "True" + {%- if __checkbox_env__.get("GENIO_DEVICE") == "G1200-evk" %} + manifest.has_genio_g1200_j34_short == "False" + {%- endif %} +flags: also-after-suspend +command: + echo "## Perform the GPIO loopback test" + gpio_loopback_test.py -oc {{OUTPUT_GPIO_CHIP_NUMBER}} -ic {{INPUT_GPIO_CHIP_NUMBER}} -po {{PHYSICAL_OUTPUT_PORT}} -go {{GPIO_OUTPUT_PIN}} -pi {{PHYSICAL_INPUT_PORT}} -gi {{GPIO_INPUT_PIN}} + +id: genio-peripheral/spi-test +template-engine: jinja2 +category_id: genio-peripheral +estimated_duration: 2s +plugin: shell +flags: also-after-suspend +user: root +imports: from com.canonical.plainbox import manifest +environ: GENIO_DEVICE +requires: + manifest.has_genio_spi_lookback == "True" + {%- if __checkbox_env__.get("GENIO_DEVICE") == "G1200-evk" %} + manifest.has_genio_g1200_j34_short == "False" + {%- endif %} +_summary: Test the SPI bus functionality on Raspberry Pi Hat +_purpose: + Check the SPI devices exist and can be used via spidev_test tool +_description: + This test will check whether the SPI devices path exists, + and send a test_string with spidev_test tool to check whether the + test_string will be received via the loopback function. + Please connect the SPI0_MO (Pin#19) & SPI0_MI (Pin#21) pins with + jumper or wire on Raspberry Pi Hat. +command: + spidev_test.py "$GENIO_DEVICE" + +id: genio-peripheral/uart1-rpi-hat-loopback-{SoC} +unit: template +template-resource: genio_device_soc +template-filter: genio_device_soc.SoC == "mt8395" +template-unit: job +category_id: genio-peripheral +estimated_duration: 2s +plugin: shell +flags: also-after-suspend +user: root +imports: from com.canonical.plainbox import manifest +requires: + manifest.has_genio_uart_lookback == "True" + manifest.has_genio_g1200_j34_short == "False" +_summary: Test UART1 functionality on Raspberry Pi Hat +_description: + This test will check the UART1 functionality on Raspberry Pi Hat by connecting UTXD (Pin#8) and URXD (Pin#10) with jumper or wire. + Please make sure there's no jumper or wire connected on J43 (not short) +command: + serialcheck.py {SoC} + +id: genio-peripheral/uart1-j52-loopback-{SoC} +unit: template +template-resource: genio_device_soc +template-filter: genio_device_soc.SoC == "mt8395" +template-unit: job +category_id: genio-peripheral +estimated_duration: 2s +plugin: shell +flags: also-after-suspend +user: root +imports: from com.canonical.plainbox import manifest +requires: + manifest.has_genio_g1200_j34_short == "True" + manifest.has_genio_g1200_j52_txd_rxd_connection == "True" +_summary: Test UART1 functionality on J52 +_description: + This test will check the UART1 functionality on J52 by connecting TXD and RXD with jumper or wire. + Please make sure there's a jumper or wire connected on J43 (short) +command: + serialcheck.py {SoC} + +id: genio-peripheral/uart1-micro-b-{SoC} +unit: template +template-resource: genio_device_soc +template-filter: genio_device_soc.SoC in ["mt8390", "mt8365"] +template-unit: job +category_id: genio-peripheral +estimated_duration: 2s +plugin: manual +flags: also-after-suspend +_summary: Test the functionality of UART1 port by sending and receiving data +_purpose: + To test if the data can be received from Host to this DUT via Micro-B UART1 port. +_steps: + 1. Connect the DUT's micro-b UART1 to Host + 2. On DUT, create a random data called binary which size is 4K for test + $ dd if=/dev/urandom of=binary count=1 bs=4096 + 3. Copy the random data to Host + 4. On DUT, run the following command to be a receiver + $ serialcheck -d /dev/ttyS1 -f binary -m r -l 5 -b 921600 + 5. On Host, run the following command to be a sender + $ serialcheck -d /dev/ttyUSBx -f binary -m t -l 5 -b 921600 + - Replace ttyUSBx with the proper usb port number according to the detected one (ttyUSB0, ttyUSB1, ttyUSB2, ...) + 6. Repeat the step 4 and 5 with the available baud + - Available baud: 9600 115200 576000 921600 +_verification: + On DUT, the rx data amount should 20480 like the following sample. + `cts: 0 dsr: 0 rng: 0 dcd: 0 rx: 20480 tx: 0 frame 0 ovr 0 par: 0 brk: 0 buf_ovrr: 0` + +id: genio-peripheral/uart2-rpi-hat-loopback-{{SoC}} +unit: template +template-resource: genio_device_soc +template-filter: genio_device_soc.SoC in ["mt8390", "mt8365"] +template-unit: job +template-engine: jinja2 +category_id: genio-peripheral +estimated_duration: 2s +plugin: shell +flags: also-after-suspend +user: root +environ: GENIO_DEVICE +imports: from com.canonical.plainbox import manifest +requires: + manifest.has_genio_uart_lookback == "True" + {%- if __checkbox_env__.get("GENIO_DEVICE") == "G700" %} + manifest.has_genio_g700_j4202_as_rpi_mode == "True" and manifest.has_genio_g700_j4203_as_rpi_mode == "True" + {%- endif %} +_summary: Test UART2 functionality on RPI hat +_description: + This test will check the UART1 functionality on RPI hat by connecting + {%- if __checkbox_env__.get("GENIO_DEVICE") == "G700" %} UTXD2 (Pin#8) and URXD2 (Pin#10){%- else %} TXD2 (Pin#8) and RXD2 (Pin#10){%- endif %} with jumper or wire. +command: + serialcheck.py {{SoC}} + +id: genio-peripheral/uart2-micro-b-{SoC} +unit: template +template-resource: genio_device_soc +template-filter: genio_device_soc.SoC == "mt8390" +template-unit: job +category_id: genio-peripheral +estimated_duration: 2s +plugin: manual +flags: also-after-suspend +imports: from com.canonical.plainbox import manifest +requires: + manifest.has_genio_g700_j4202_as_rpi_mode == "False" + manifest.has_genio_g700_j4203_as_rpi_mode == "False" +_summary: Test the functionality of Micro-B UART2 port by sending and receiving data +_purpose: + To test if the data can be received from Host to this DUT via Micro-B UART2 port. +_steps: + 1. Connect the DUT's Micro-B UART2 to Host via Micro-B to USB cable + 2. On DUT, create a random data called binary which size is 4K + $ dd if=/dev/urandom of=binary count=1 bs=4096 + 3. Copy the random data to Host + 4. On DUT, run the following command to be a receiver + $ serialcheck -d /dev/ttyS1 -f binary -m r -l 5 -b 921600 + 5. On Host, run the following command to be a sender + $ serialcheck -d /dev/ttyUSBx -f binary -m t -l 5 -b 921600 + - Replace ttyUSBx with the proper usb port number according to the detected one (ttyUSB0, ttyUSB1, ttyUSB2, ...) + 6. Repeat the step 4 and 5 with the available baud + - Available baud: 9600 115200 576000 921600 +_verification: + On DUT, the rx data amount should 20480 like the following sample. + `cts: 0 dsr: 0 rng: 0 dcd: 0 rx: 20480 tx: 0 frame 0 ovr 0 par: 0 brk: 0 buf_ovrr: 0` \ No newline at end of file diff --git a/contrib/genio/units/peripheral/manifest.pxu b/contrib/genio/units/peripheral/manifest.pxu new file mode 100644 index 0000000000..be49a57c19 --- /dev/null +++ b/contrib/genio/units/peripheral/manifest.pxu @@ -0,0 +1,41 @@ +unit: manifest entry +id: has_genio_gpio_lookback +_prompt: Does this device have the following on Raspberry PI Hat?: +_name: GPIO Loopback Connector +value-type: bool + +unit: manifest entry +id: has_genio_spi_lookback +_prompt: Does this device have the following on Raspberry PI Hat?: +_name: SPI Loopback Connector +value-type: bool + +unit: manifest entry +id: has_genio_uart_lookback +_prompt: Does this device have the following on Raspberry PI Hat?: +_name: UART Loopback Connector +value-type: bool + +unit: manifest entry +id: has_genio_g1200_j34_short +_prompt: G1200-evk Platform Specific: +_name: Is there a jumper or wire connected on J43 (short J43)? +value-type: bool + +unit: manifest entry +id: has_genio_g1200_j52_txd_rxd_connection +_prompt: G1200-evk Platform Specific: +_name: Is there a jumper or wire connected TXD and RXD on J52? +value-type: bool + +unit: manifest entry +id: has_genio_g700_j4202_as_rpi_mode +_prompt: G700 Platform Specific: +_name: Is J4202 configured as Raspberry PI mode? +value-type: bool + +unit: manifest entry +id: has_genio_g700_j4203_as_rpi_mode +_prompt: G700 Platform Specific: +_name: Is J4203 configured as Raspberry PI mode? +value-type: bool diff --git a/contrib/genio/units/peripheral/test-plan.pxu b/contrib/genio/units/peripheral/test-plan.pxu new file mode 100644 index 0000000000..0ba6450bd4 --- /dev/null +++ b/contrib/genio/units/peripheral/test-plan.pxu @@ -0,0 +1,57 @@ +id: genio-peripheral-full +unit: test plan +_name: Genio Peripheral tests +_description: Full Genio Peripheral tests for devices +include: +nested_part: + genio-peripheral-manual + genio-peripheral-automated + +id: genio-peripheral-manual +unit: test plan +_name: Genio Peripheral manual tests +_description: Manual Genio Peripheral tests for devices +include: + genio-peripheral/uart1-micro-b-.* + genio-peripheral/uart2-micro-b-.* + +id: genio-peripheral-automated +unit: test plan +_name: Genio Peripheral auto tests +_description: Automated Genio Peripheral tests for devices +bootstrap_include: + gpio_loopback_pin_mapping +include: + genio-peripheral/spi-test + genio-peripheral/gpio-loopback-on-phycial-output-.*-input-.* + genio-peripheral/uart1-rpi-hat-loopback-.* + genio-peripheral/uart1-j52-loopback-.* + genio-peripheral/uart2-rpi-hat-loopback-.* + +id: after-suspend-genio-peripheral-full +unit: test plan +_name: Genio Peripheral tests (after suspend) +_description: Full after suspend Genio Peripheral tests for devices +include: +nested_part: + +id: after-suspend-genio-peripheral-manual +unit: test plan +_name: Genio Peripheral manual tests (after suspend) +_description: Manual after suspend Genio Peripheral tests for devices +include: + after-suspend-genio-peripheral/uart1-micro-b-.* + after-suspend-genio-peripheral/uart2-micro-b-.* + +id: after-suspend-genio-peripheral-automated +unit: test plan +_name: Genio Peripheral auto tests (after suspend) +_description: Automated after suspend Genio Peripheral tests for devices +bootstrap_include: + gpio_loopback_pin_mapping +include: + after-suspend-genio-peripheral/spi-test + after-suspend-genio-peripheral/gpio-loopback-on-phycial-output-.*-input-.* + after-suspend-genio-peripheral/uart1-rpi-hat-loopback-.* + after-suspend-genio-peripheral/uart1-j52-loopback-.* + after-suspend-genio-peripheral/uart2-rpi-hat-loopback-.* diff --git a/contrib/genio/units/power-management/category.pxu b/contrib/genio/units/power-management/category.pxu new file mode 100644 index 0000000000..e8d6c34f8f --- /dev/null +++ b/contrib/genio/units/power-management/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: genio-power-management +_name: Genio Power Management tests diff --git a/contrib/genio/units/power-management/jobs.pxu b/contrib/genio/units/power-management/jobs.pxu new file mode 100644 index 0000000000..f50ba85704 --- /dev/null +++ b/contrib/genio/units/power-management/jobs.pxu @@ -0,0 +1,162 @@ +id: genio-power-management/pmic-irq +unit: job +category_id: genio-power-management +plugin: manual +estimated_duration: 60 +_summary: Check the device can be shutdown by pressing the power button +_purpose: + Check the device can be shut down by pressing the power button +_steps: + 1. Power the device off by pressing the power button +_verification: + 1. Did the device shutdown successfully? + +id: genio-power-management/dvfs-gpufreq-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +plugin: shell +flags: also-after-suspend +estimated_duration: 1 +_summary: Check the GPU governors match expectation on SoC {SoC} +command: + dvfs_gpu_check_governors.py {SoC} + +id: genio-power-management/clock-manager-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +requires: genio_device_soc.SoC in ['mt8390','mt8395'] +plugin: shell +user: root +flags: also-after-suspend +estimated_duration: 1 +_summary: Check the Clock Manager (Linux CCF) matchs expectation on SoC {SoC} +command: + linux_ccf.py {SoC} + +id: genio-power-management/pmic-regulator-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +plugin: shell +user: root +flags: also-after-suspend +estimated_duration: 1 +_summary: Check all regulators' name match expectation on SoC {SoC} +command: + pmic_regulator.py {SoC} + +id: genio-power-management/cpu-idle-wfi-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +plugin: shell +user: root +flags: also-after-suspend +estimated_duration: 1 +_summary: Check specific CPU node is WFI and is enabled on SoC {SoC} +command: + cpu_idle.py {SoC} -c 'wfi' + +id: genio-power-management/cpu-idle-mcdi-cpu-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +plugin: shell +user: root +requires: genio_device_soc.SoC == 'mt8365' +flags: also-after-suspend +estimated_duration: 1 +_summary: Check specific CPU is mcdi-cpu and is enabled on SoC {SoC} +command: + cpu_idle.py {SoC} -c 'mcdi-cpu' + +id: genio-power-management/cpu-idle-mcdi-cluster-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +plugin: shell +user: root +requires: genio_device_soc.SoC == 'mt8365' +flags: also-after-suspend +estimated_duration: 1 +_summary: Check specific CPU is mcdi-cluster and is enabled on SoC {SoC} +command: + cpu_idle.py {SoC} -c 'mcdi-cluster' + +id: genio-power-management/cpu-idle-dpidle-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +plugin: shell +user: root +requires: genio_device_soc.SoC == 'mt8365' +flags: also-after-suspend +estimated_duration: 1 +_summary: Check specific CPU is dpidle and is enabled on SoC {SoC} +command: + cpu_idle.py {SoC} -c 'dpidle' + +id: genio-power-management/cpu-idle-clusteroff-l-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +plugin: shell +user: root +requires: genio_device_soc.SoC in ['mt8390','mt8395'] +flags: also-after-suspend +estimated_duration: 1 +_summary: Check specific CPU is clusteroff-l and is enabled on SoC {SoC} +command: + cpu_idle.py {SoC} -c 'clusteroff-l' + +id: genio-power-management/cpu-idle-clusteroff-b-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +plugin: shell +user: root +requires: genio_device_soc.SoC in ['mt8390','mt8395'] +flags: also-after-suspend +estimated_duration: 1 +_summary: Check specific CPU is clusteroff-b and is enabled on SoC {SoC} +command: + cpu_idle.py {SoC} -c 'clusteroff-b' + +id: genio-power-management/cpu-idle-cpuoff-l-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +plugin: shell +user: root +requires: genio_device_soc.SoC in ['mt8390','mt8395'] +flags: also-after-suspend +estimated_duration: 1 +_summary: Check specific CPU is cpuoff-l and is enabled on SoC {SoC} +command: + cpu_idle.py {SoC} -c 'cpuoff-l' + +id: genio-power-management/cpu-idle-cpuoff-b-{SoC} +unit: template +template-resource: genio_device_soc +template-unit: job +category_id: genio-power-management +plugin: shell +user: root +requires: genio_device_soc.SoC in ['mt8390','mt8395'] +flags: also-after-suspend +estimated_duration: 1 +_summary: Check specific CPU is cpuoff-b and is enabled on SoC {SoC} +command: + cpu_idle.py {SoC} -c 'cpuoff-b' diff --git a/contrib/genio/units/power-management/test-plan.pxu b/contrib/genio/units/power-management/test-plan.pxu new file mode 100644 index 0000000000..e1366b7983 --- /dev/null +++ b/contrib/genio/units/power-management/test-plan.pxu @@ -0,0 +1,57 @@ +id: genio-power-management-full +unit: test plan +_name: Genio Power Management test +_description: Full power management tests for G1200-evk, G700 and G350 platforms +include: +nested_part: + genio-power-management-manual + genio-power-management-automated + after-suspend-genio-power-management-manual + after-suspend-genio-power-management-automated + +id: genio-power-management-manual +unit: test plan +_name: Genio Manual Power Management test +_description: Manual power management test for G1200-evk, G700 and G350 platforms +include: + genio-power-management/pmic-irq + +id: genio-power-management-automated +unit: test plan +_name: Genio Auto Power Management test +_description: Automated power management test for G1200-evk, G700 and G350 platforms +include: + genio-power-management/dvfs-gpufreq-.* + genio-power-management/clock-manager-.* + genio-power-management/pmic-regulator-.* + genio-power-management/cpu-idle-wfi-.* + genio-power-management/cpu-idle-mcdi-cpu-.* + genio-power-management/cpu-idle-mcdi-cluster-.* + genio-power-management/cpu-idle-dpidle-.* + genio-power-management/cpu-idle-clusteroff-l-.* + genio-power-management/cpu-idle-clusteroff-b-.* + genio-power-management/cpu-idle-cpuoff-l-.* + genio-power-management/cpu-idle-cpuoff-b-.* + +id: after-suspend-genio-power-management-manual +unit: test plan +_name: Genio Manual power management test +_description: Manual after suspend power management test for G1200-evk, G700 and G350 platforms +include: + +id: after-suspend-genio-power-management-automated +unit: test plan +_name: Genio Auto Power Management test +_description: Automated after suspend power management test for G1200-evk, G700 and G350 platforms +include: + after-suspend-genio-power-management/dvfs-gpufreq.* + after-suspend-genio-power-management/clock-manager-.* + after-suspend-genio-power-management/pmic-regulator-.* + after-suspend-genio-power-management/cpu-idle-wfi-.* + after-suspend-genio-power-management/cpu-idle-mcdi-cpu-.* + after-suspend-genio-power-management/cpu-idle-mcdi-cluster-.* + after-suspend-genio-power-management/cpu-idle-dpidle-.* + after-suspend-genio-power-management/cpu-idle-clusteroff-l-.* + after-suspend-genio-power-management/cpu-idle-clusteroff-b-.* + after-suspend-genio-power-management/cpu-idle-cpuoff-l-.* + after-suspend-genio-power-management/cpu-idle-cpuoff-b-.* diff --git a/contrib/genio/units/thermal/category.pxu b/contrib/genio/units/thermal/category.pxu new file mode 100644 index 0000000000..f1fe6cc8cd --- /dev/null +++ b/contrib/genio/units/thermal/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: genio-thermal +_name: Genio Thermal tests diff --git a/contrib/genio/units/thermal/jobs.pxu b/contrib/genio/units/thermal/jobs.pxu new file mode 100644 index 0000000000..8fa3ebdb4b --- /dev/null +++ b/contrib/genio/units/thermal/jobs.pxu @@ -0,0 +1,67 @@ +id: genio-thermal/simulate-overheating +_summary: Test that the thermal shuts systemd down when overheated +_description: Simulate the overheating to shut system down automatically. System will power on automatically after waiting 1 minutes +category_id: genio-thermal +flags: preserve-locale noreturn autorestart also-after-suspend +user: root +plugin: shell +estimated_duration: 60 +environ: PLAINBOX_SESSION_SHARE +command: + # Clean up + if [ -f "$PLAINBOX_SESSION_SHARE/thermal-is-malfunction" ]; then + rm "$PLAINBOX_SESSION_SHARE/thermal-is-malfunction" + fi + # Feed the super high temperature to the emul_temp of thermal_zone0 + # to make thermal shuts system down and rtcwake will wake system up. + rtcwake -v -m no -s 60 + echo 999999 > /sys/class/thermal/thermal_zone0/emul_temp + # If thermal's protection mechanism doesn't work, reboot the system + # to prevent Checkbox stucks here due to noreturn flag + sleep 10 + touch "${PLAINBOX_SESSION_SHARE}/thermal-is-malfunction" + reboot + +id: genio-thermal/post-simulate-overheating +depends: genio-thermal/simulate-overheating +category_id: genio-thermal +_summary: Post simulate-overheating check +_description: Check the thermal protection log after simulate-overheating +user: root +plugin: shell +estimated_duration: 1.0 +flags: also-after-suspend +command: + # Thermal is malfunction + if [ -f "$PLAINBOX_SESSION_SHARE/thermal-is-malfunction" ]; then + echo "ERROR: found the $PLAINBOX_SESSION_SHARE/thermal-is-malfunction file exists" + echo "FAIL: thermal is malfunction, it cannot shut system down when overheating" + exit 1 + fi + # Thermal is functional, dump the journal of previous boot + temp_log=$( mktemp ) + journalctl -b -1 | tee $temp_log + golden_message="reboot: HARDWARE PROTECTION shutdown (Temperature too high)" + if ! grep -qw "$golden_message" "$temp_log" ; then + echo "FAIL: no '$golden_message' message be found in journal" + exit 1 + fi + echo "PASS: found '$golden_message' in journal log" + + +id: genio-thermal/thermal +_summary: Check Thermal sensor +plugin: shell +category_id: genio-thermal +user: root +estimated_duration: 30 +flags: preserve-locale also-after-suspend +command: + echo "Starting stress-ng in 10s" + sleep 10 + cur=$(cat /sys/class/thermal/thermal_zone0/temp) + printf "Before stress: %0.2f°C\n" "$(bc -l <<< "$cur/1000")" + stress-ng --cpu 8 --io 4 --vm 2 --vm-bytes 128M --fork 4 --timeout 20s + new=$(cat /sys/class/thermal/thermal_zone0/temp) + printf "After stress: %0.2f°C\n" "$(bc -l <<< "$new/1000")" + [[ $new -gt $cur ]] \ No newline at end of file diff --git a/contrib/genio/units/thermal/test-plan.pxu b/contrib/genio/units/thermal/test-plan.pxu new file mode 100644 index 0000000000..f39819387a --- /dev/null +++ b/contrib/genio/units/thermal/test-plan.pxu @@ -0,0 +1,45 @@ +id: baoshan-thermal-full +unit: test plan +_name: Baoshan Thermal tests +_description: Full Baoshan Thermal tests for devices +include: +nested_part: + baoshan-thermal-manual + baoshan-thermal-automated + +id: baoshan-thermal-manual +unit: test plan +_name: Baoshan Thermal manual tests +_description: Manual Baoshan Thermal tests for devices +include: + +id: baoshan-thermal-automated +unit: test plan +_name: Baoshan Thermal auto tests +_description: Automated Baoshan Thermal tests for devices +include: + baoshan-thermal/thermal + baoshan-thermal/simulate-overheating + baoshan-thermal/post-simulate-overheating + +id: after-suspend-baoshan-thermal-full +unit: test plan +_name: Baoshan Thermal tests (after suspend) +_description: Full after suspend Baoshan Thermal tests for devices +include: +nested_part: + +id: after-suspend-baoshan-thermal-manual +unit: test plan +_name: Baoshan Thermal manual tests (after suspend) +_description: Manual after suspend Baoshan Thermal tests for devices +include: + +id: after-suspend-baoshan-thermal-automated +unit: test plan +_name: Baoshan Thermal auto tests (after suspend) +_description: Automated after suspend Baoshan Thermal tests for devices +include: + after-suspend-baoshan-thermal/thermal + after-suspend-baoshan-thermal/simulate-overheating + after-suspend-baoshan-thermal/post-simulate-overheating From d6e050e0e36a8703399815cc848cc6a0a7ca3d84 Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Fri, 8 Mar 2024 16:31:57 +0100 Subject: [PATCH 069/108] Fix dkms build validation in mantic (BugFix) (#1039) * Moved dkms_build_validation to python script * Refactored dkms test * added dkms python test to sru.pxu * Finished dkms validation implementation and added tests * Replaced prints for logs * Fixed some of the PR comments * Decomposition of main branches * Fixed bad command and a typing * Added context for error lines * # We will capture stdout and stderr in stdout * Fixed tests for run command * Small issues fixed on get_context_lines --- providers/sru/bin/dkms_build_validation | 67 ------ providers/sru/bin/dkms_build_validation.py | 186 +++++++++++++++ .../sru/tests/test_dkms_build_validation.py | 218 ++++++++++++++++++ providers/sru/units/sru.pxu | 2 +- 4 files changed, 405 insertions(+), 68 deletions(-) delete mode 100755 providers/sru/bin/dkms_build_validation create mode 100755 providers/sru/bin/dkms_build_validation.py create mode 100644 providers/sru/tests/test_dkms_build_validation.py diff --git a/providers/sru/bin/dkms_build_validation b/providers/sru/bin/dkms_build_validation deleted file mode 100755 index 24440640c1..0000000000 --- a/providers/sru/bin/dkms_build_validation +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash -# Copyright 2017 Canonical Ltd. -# Written by: -# Taihsiang Ho (tai271828) -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, -# as published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -ubuntu_release=`lsb_release -r | cut -d ':' -f 2 | xargs` - -if [ $ubuntu_release = '22.04' ]; then - kernel_ver_min=`dkms status | awk -F ', ' {'print $2'} | sort -V | uniq | head -1` - kernel_ver_max=`dkms status | awk -F ', ' {'print $2'} | sort -V | uniq | tail -1` -else - kernel_ver_min=`dkms status | awk -F ', ' {'print $3'} | sort -V | uniq | head -1` - kernel_ver_max=`dkms status | awk -F ', ' {'print $3'} | sort -V | uniq | tail -1` -fi -kernel_ver_current=`uname -r` - -number_dkms_min=`dkms status | grep $kernel_ver_min | grep installed | wc -l` -number_dkms_max=`dkms status | grep $kernel_ver_max | grep installed | wc -l` - -scan_log="/var/log/apt/term.log" - -# kernel_ver_max should be the same as kernel_ver_current -if [ "$kernel_ver_current" != "$kernel_ver_max" ]; then - echo "Current using kernel version does not match the latest built DKMS module." - echo "Your running kernel: $kernel_ver_current" - echo "Latest DKMS module built on kernel: $kernel_ver_max" - echo "Maybe the target DKMS was not built," - echo "or you are not running the latest available kernel." - echo - echo "=== DKMS status ===" - dkms status - exit 1 -fi - -# compare the number of dkms modules of min and max kernels -if [ "$number_dkms_min" -ne "$number_dkms_max" ]; then - echo "$number_dkms_min modules for $kernel_ver_min" - echo "$number_dkms_max modules for $kernel_ver_max" - echo "DKMS module number is inconsistent. Some modules may not be built." - echo - echo "=== DKMS status ===" - dkms status -fi - -# scan the APT log during system update -error_message="Bad return status for module build on kernel: $kernel_ver_current" -error_in_log=`grep "$error_message" $scan_log | wc -l` -if [ "$error_in_log" -gt 0 ]; then - echo "Found dkms build error messages in $scan_log" - echo - echo "=== build log ===" - grep "$error_message" $scan_log -A 5 -B 5 - exit 1 -fi - diff --git a/providers/sru/bin/dkms_build_validation.py b/providers/sru/bin/dkms_build_validation.py new file mode 100755 index 0000000000..a1cf4f68e1 --- /dev/null +++ b/providers/sru/bin/dkms_build_validation.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 +# +# Copyright 2017-2024 Canonical Ltd. +# Written by: +# Taihsiang Ho (tai271828) +# Fernando Bravo +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from collections import Counter +import logging +from packaging import version +import re +import subprocess +import sys +import textwrap +from typing import Dict, List, Set + +logger = logging.getLogger("dkms_build_validation") + + +def run_command(command: List[str]) -> str: + """Run a shell command and return its output""" + try: + result = subprocess.check_output( + command, + stderr=subprocess.STDOUT, # We capture stdout and stderr in stdout + universal_newlines=True, + ) + return result.strip() + except subprocess.CalledProcessError as e: + raise SystemExit( + "Command '{0}' failed with exit code {1}:\n{2}".format( + e.cmd, e.returncode, e.stdout + ) + ) + + +def parse_version(ver: str) -> version.Version: + """Parse the version string and return a version object""" + match = re.match(r"(\d+\.\d+\.\d+(-\d+)?)", ver) + if match: + parsed_version = version.parse(match.group(1)) + else: + raise SystemExit("Invalid version string: {0}".format(ver)) + return parsed_version + + +def parse_dkms_status(dkms_status: str, ubuntu_release: str) -> List[Dict]: + """Parse the output of 'dkms status', the result is a list of dictionaries + that contain the kernel version parsed the status for each one. + """ + kernel_info = [] + for line in dkms_status.splitlines(): + details, status = line.split(": ") + if version.parse(ubuntu_release) >= version.parse("22.04"): + kernel_ver = details.split(", ")[1] + else: + kernel_ver = details.split(", ")[2] + kernel_info.append({"version": kernel_ver, "status": status}) + + sorted_kernel_info = sorted( + kernel_info, key=lambda x: parse_version(x["version"]) + ) + return sorted_kernel_info + + +def check_kernel_version( + kernel_ver_current: str, sorted_kernel_info: List[Dict], dkms_status: str +) -> int: + kernel_ver_max = sorted_kernel_info[-1]["version"] + if kernel_ver_max != kernel_ver_current: + msg = textwrap.dedent( + """ + Current kernel version does not match the latest built DKMS module. + Your running kernel: {kernel_ver_current} + Latest DKMS module built on kernel: {kernel_ver_max} + Maybe the target DKMS was not built, + or you are not running the latest available kernel. + """.format( + kernel_ver_current=kernel_ver_current, + kernel_ver_max=kernel_ver_max, + ) + ) + logger.error(msg) + logger.error("=== DKMS status ===\n{0}".format(dkms_status)) + return 1 + return 0 + + +def check_dkms_module_count(sorted_kernel_info: List[Dict], dkms_status: str): + kernel_ver_max = sorted_kernel_info[-1]["version"] + kernel_ver_min = sorted_kernel_info[0]["version"] + + version_count = Counter([item["version"] for item in sorted_kernel_info]) + number_dkms_min = version_count[kernel_ver_min] + number_dkms_max = version_count[kernel_ver_max] + number_dkms_min = version_count[kernel_ver_min] + number_dkms_max = version_count[kernel_ver_max] + + if number_dkms_min != number_dkms_max: + msg = textwrap.dedent( + """ + {number_dkms_min} modules for {kernel_ver_min} + {number_dkms_max} modules for {kernel_ver_max} + DKMS module number is inconsistent. Some modules may not be built. + """.format( + number_dkms_min=number_dkms_min, + kernel_ver_min=kernel_ver_min, + number_dkms_max=number_dkms_max, + kernel_ver_max=kernel_ver_max, + ) + ) + logger.warning(msg) + logger.warning("=== DKMS status ===\n{0}".format(dkms_status)) + return 1 + return 0 + + +def get_context_lines(log: List[str], line_numbers: Set[int]) -> List[str]: + # Create a set with the indexes of the lines to be printed + context_lines = set() + context = 5 + n_lines = len(log) + for i in line_numbers: + min_numbers = max(0, i - context) + max_numbers = min(n_lines, i + context + 1) + for j in range(min_numbers, max_numbers): + context_lines.add(j) + return [log[i] for i in sorted(context_lines)] + + +def has_dkms_build_errors(kernel_ver_current: str) -> int: + log_path = "/var/log/apt/term.log" + err_msg = "Bad return status for module build on kernel: {}".format( + kernel_ver_current + ) + with open(log_path, "r") as f: + log = f.readlines() + err_line_numbers = {i for i, line in enumerate(log) if err_msg in line} + if err_line_numbers: + logger.error( + "Found dkms build error messages in {}".format(log_path) + ) + logger.error("\n=== build log ===") + err_with_context = get_context_lines(log, err_line_numbers) + logger.error("".join(err_with_context)) + return 1 + return 0 + + +def main(): + # Get the kernel version and DKMS status + ubuntu_release = run_command(["lsb_release", "-r"]).split()[-1] + dkms_status = run_command(["dkms", "status"]) + + # Parse and sort the DKMS status and sort the kernel versions + sorted_kernel_info = parse_dkms_status(dkms_status, ubuntu_release) + + # kernel_ver_max should be the same as kernel_ver_current + kernel_ver_current = run_command(["uname", "-r"]) + if check_kernel_version( + kernel_ver_current, sorted_kernel_info, dkms_status + ): + return 1 + + # Count the occurernces of the latest and the oldest kernel version and + # compare the number of DKMS modules for min and max kernel versions + check_dkms_module_count(sorted_kernel_info, dkms_status) + + # Scan the APT log for errors during system update + return has_dkms_build_errors(kernel_ver_current) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/providers/sru/tests/test_dkms_build_validation.py b/providers/sru/tests/test_dkms_build_validation.py new file mode 100644 index 0000000000..ecb5a20a14 --- /dev/null +++ b/providers/sru/tests/test_dkms_build_validation.py @@ -0,0 +1,218 @@ +from packaging.version import Version +import unittest +from unittest.mock import patch, mock_open +import subprocess + +from dkms_build_validation import ( + run_command, + parse_dkms_status, + parse_version, + check_kernel_version, + check_dkms_module_count, + get_context_lines, + has_dkms_build_errors, + main, +) + + +class TestDKMSValidation(unittest.TestCase): + + # Example output of `dkms status` + dkms_status = ( + "fwts/24.01.00, 6.5.0-17-generic, x86_64: installed\n" + "fwts/24.01.00, 6.5.0-15-generic, x86_64: installed" + ) + + sorted_kernel_info = [ + {"version": "6.5.0-15-generic", "status": "installed"}, + {"version": "6.5.0-17-generic", "status": "installed"}, + ] + + @patch("dkms_build_validation.subprocess.check_output") + def test_run_command(self, mock_check_output): + mock_check_output.return_value = "output" + result = run_command(["lsb_release", "-r"]) + self.assertEqual(result, "output") + mock_check_output.assert_called_once_with( + ["lsb_release", "-r"], + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + + @patch("subprocess.check_output") + def test_run_command_exception(self, mock_check_output): + # Simulate a CalledProcessError exception + mock_check_output.side_effect = subprocess.CalledProcessError( + 1, ["test_command"] + ) + + # run_command will raise an exception + with self.assertRaises(SystemExit): + run_command(["test_command"]) + + def test_parse_dkms_status(self): + ubuntu_release = "22.04" + kernel_info = parse_dkms_status(self.dkms_status, ubuntu_release) + # Assuming you have a specific expected output for kernel_info + expected_kernel_info = [ + {"version": "6.5.0-15-generic", "status": "installed"}, + {"version": "6.5.0-17-generic", "status": "installed"}, + ] + self.assertEqual(kernel_info, expected_kernel_info) + + def test_parse_dkms_status_old(self): + old_dkms_status = ( + "fwts, 24.01.00, 6.5.0-17-generic, x86_64: installed\n" + "fwts, 24.01.00, 6.5.0-15-generic, x86_64: installed" + ) + ubuntu_release = "18.04" + sorted_kernel_info = parse_dkms_status(old_dkms_status, ubuntu_release) + # Assuming you have a specific expected output for kernel_info + expected_kernel_info = [ + {"version": "6.5.0-15-generic", "status": "installed"}, + {"version": "6.5.0-17-generic", "status": "installed"}, + ] + self.assertEqual(sorted_kernel_info, expected_kernel_info) + + # Test the old format with a newer Ubuntu release + ubuntu_release = "22.04" + sorted_kernel_info = parse_dkms_status(old_dkms_status, ubuntu_release) + self.assertNotEqual(sorted_kernel_info, expected_kernel_info) + + def test_parse_version(self): + # Test with a valid version string + self.assertEqual( + parse_version("6.5.0-18-generic"), Version("6.5.0.post18") + ) + # Test with a shorter valid version string + self.assertEqual(parse_version("6.5.0"), Version("6.5.0")) + + # Test with an different version string + self.assertNotEqual( + parse_version("6.5.0-20-generic"), Version("6.5.0.post18") + ) + + # Test with an invalid version string + with self.assertRaises(SystemExit): + parse_version("Wrong version string") + + def test_check_kernel_version(self): + # Test with a kernel version that matches the latest one + self.assertEqual( + check_kernel_version( + "6.5.0-17-generic", self.sorted_kernel_info, self.dkms_status + ), + 0, + ) + + # Test with a kernel version that doesn't match the latest one + self.assertEqual( + check_kernel_version( + "6.5.0-18-generic", self.sorted_kernel_info, self.dkms_status + ), + 1, + ) + + def test_check_dkms_module_count(self): + # Test with the same number of modules + self.assertEqual( + check_dkms_module_count(self.sorted_kernel_info, self.dkms_status), + 0, + ) + + # Test with a different number of modules + bad_kernel_info = self.sorted_kernel_info + [ + {"version": "6.5.0-17-generic", "status": "installed"} + ] + self.assertEqual( + check_dkms_module_count(bad_kernel_info, self.dkms_status), + 1, + ) + + def test_get_context_lines_center(self): + log = ["L{}".format(i) for i in range(0, 20)] + line_idx = {10, 11} + expected_output = ["L{}".format(i) for i in range(5, 17)] + self.assertEqual(get_context_lines(log, line_idx), expected_output) + + def test_get_context_lines_edges(self): + log = ["L{}".format(i) for i in range(0, 20)] + line_idx = {0, 18} + expected_output = [ + "L0", + "L1", + "L2", + "L3", + "L4", + "L5", + "L13", + "L14", + "L15", + "L16", + "L17", + "L18", + "L19", + ] + self.assertEqual(get_context_lines(log, line_idx), expected_output) + + def test_has_dkms_build_errors(self): + kernel_ver_current = "6.5.0-17-generic" + + # Test with a log file that doesn't contain any errors + data = "Some log message\nSome log message\nSome log message\n" + with patch("builtins.open", mock_open(read_data=data)): + self.assertEqual(has_dkms_build_errors(kernel_ver_current), False) + + # Test with a log file that contains errors + data = ( + "Some log message\n" + "Bad return status for module build on kernel: 6.5.0-17-generic\n" + "Some log message\n" + ) + with patch("builtins.open", mock_open(read_data=data)): + self.assertEqual(has_dkms_build_errors(kernel_ver_current), True) + + @patch("dkms_build_validation.run_command") + @patch("dkms_build_validation.parse_dkms_status") + @patch("dkms_build_validation.check_kernel_version") + @patch("dkms_build_validation.check_dkms_module_count") + @patch("dkms_build_validation.has_dkms_build_errors") + def test_main( + self, mock_err, mock_count, mock_ver, mock_parse, mock_run_command + ): + mock_run_command.return_value = "output" + mock_parse.return_value = [] + mock_ver.return_value = 0 + mock_count.return_value = 0 + mock_err.return_value = 0 + self.assertEqual(main(), 0) + + @patch("dkms_build_validation.run_command") + @patch("dkms_build_validation.parse_dkms_status") + @patch("dkms_build_validation.check_kernel_version") + @patch("dkms_build_validation.check_dkms_module_count") + @patch("dkms_build_validation.has_dkms_build_errors") + def test_main_different_kernel_version( + self, mock_err, mock_count, mock_ver, mock_parse, mock_run_command + ): + mock_run_command.return_value = "output" + mock_parse.return_value = [] + mock_ver.return_value = 1 + mock_count.return_value = 0 + mock_err.return_value = 0 + self.assertEqual(main(), 1) + + @patch("dkms_build_validation.run_command") + @patch("dkms_build_validation.parse_dkms_status") + @patch("dkms_build_validation.check_kernel_version") + @patch("dkms_build_validation.check_dkms_module_count") + @patch("dkms_build_validation.has_dkms_build_errors") + def test_main_with_dkms_build_errors( + self, mock_err, mock_count, mock_ver, mock_parse, mock_run_command + ): + mock_run_command.return_value = "output" + mock_parse.return_value = [] + mock_ver.return_value = 0 + mock_count.return_value = 0 + mock_err.return_value = 1 + self.assertEqual(main(), 1) diff --git a/providers/sru/units/sru.pxu b/providers/sru/units/sru.pxu index 6b2bc35b73..85c666ca61 100644 --- a/providers/sru/units/sru.pxu +++ b/providers/sru/units/sru.pxu @@ -7,7 +7,7 @@ category_id: com.canonical.plainbox::miscellanea id: miscellanea/dkms_build_validation requires: package.name == 'dkms' command: - dkms_build_validation + dkms_build_validation.py _summary: Validate the build status of DKMS modules, automatically _description: Firstly, check the built number of DKMS modules. From feb99f55c6e59784684a7eb099e13e52760ca3ad Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Mon, 11 Mar 2024 15:03:33 +0100 Subject: [PATCH 070/108] Documentation for job statuses (infra) (#1050) * Init documentation for job statuses * Learning to spell * Better section titles Co-authored-by: tang-mm <3333407+tang-mm@users.noreply.github.com> * Better structure/clearer grammar Co-authored-by: tang-mm <3333407+tang-mm@users.noreply.github.com> * Correct Failing status reason Minor: newlines at 79c --------- Co-authored-by: tang-mm <3333407+tang-mm@users.noreply.github.com> --- docs/reference/index.rst | 1 + docs/reference/job-status.rst | 82 +++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 docs/reference/job-status.rst diff --git a/docs/reference/index.rst b/docs/reference/index.rst index 4ee37a766a..05d158b0b9 100644 --- a/docs/reference/index.rst +++ b/docs/reference/index.rst @@ -11,3 +11,4 @@ Reference units/index snaps submission-schema + job-status diff --git a/docs/reference/job-status.rst b/docs/reference/job-status.rst new file mode 100644 index 0000000000..f73ce91985 --- /dev/null +++ b/docs/reference/job-status.rst @@ -0,0 +1,82 @@ +.. _job_status: + +Checkbox Job Status +=================== + +A Checkbox job always receives a final status either automatically or +manually. The following is a description of what each status means and when/how +it is received. + +Passed Jobs +------------ + +The Passing outcome is the marker for a successful run. It can be assigned in +the following situations: + +- Automated job returned a 0 return code +- Automated job is marked as ``noreturn`` and the session was interrupted and + brought back +- Manual job marked as passed by the user +- Job explicitly marked as passed by the user when a session was manually + brought back after interruption + +Every passed job is marked by either the following symbol ``☑`` (ballot box +with check) or the text ``job passed``. Checkbox internally uses the +``IJobResult.OUTCOME_PASS`` object to mark these jobs. + +Skipped Jobs +------------ + +The Skipped outcome is the marker for a job that was intentionally not started +either by the user or Checkbox itself. This can be due to the following +reasons: + +- Job with a ``require`` constraint that can not be satisfied +- Job with a dependency on a job that is skipped itself +- Job explicitly skipped by the user via the ``launcher`` +- Job explicitly skipped by the user via the Ctrl+C menu +- Job explicitly skipped by the user via the resume screen + +Every skipped job is either marked by the following symbol ``" "`` (white +space) or the text ``job skipped``. Checkbox internally uses the +``IJobResult.OUTCOME_SKIP`` to mark these jobs. + +Failed Jobs +------------ + +The Failing outcome is the marker for a failing job run. It can be assigned in +the following situations: + +- Automated job returned a non-0 return code +- Manual job marked as failed by the user +- Job explicitly marked as failed by the user when a session was manually + brought back after interruption + +Every failed job is marked by either the following symbol ``☒`` +(ballot box with X) or the text ``job failed``. Checkbox internally +uses the ``IJobResult.OUTCOME_FAIL`` object to mark these jobs. + +Crashed Jobs +------------- + +The Crashing outcome is the marker for a crashing job. It can only be assigned +to automated job in the following situations: + +- Job crashed or was forcibly terminated by an external actor (like the Out of + Memory Guardian) +- Job interrupted the testing session without a ``noreturn`` flag + +Every crashed job is marked by either the warning marker ``⚠`` (warning sign) +or the text ``job crashed``. Checkbox internally uses the +``IJobResult.OUTCOME_CRASH`` object to mark these jobs. + +Not Started Jobs +---------------- + +The Not Started outcome is the marker for a job that can not be started. It is +assigned only in the situation where a job depends on another job that was +either skipped or not started itself. + +Every not-started job is marked either by the following marker ``☐`` (ballot +box) or the text ``job cannot be started``. Checkbox internally uses the +``IJobResult.OUTCOME_NOT_SUPPORTED`` object to mark these jobs. From cd17d9a51b6565b8736f5fddf3a09102197c3efd Mon Sep 17 00:00:00 2001 From: kissiel Date: Tue, 12 Mar 2024 09:23:06 +0100 Subject: [PATCH 071/108] Remove snappy-revert sub-tp from sru (BugFix) (#1043) remove snappy-revert sub-tp from sru --- providers/sru/units/sru.pxu | 1 - 1 file changed, 1 deletion(-) diff --git a/providers/sru/units/sru.pxu b/providers/sru/units/sru.pxu index 85c666ca61..6cdae56aaa 100644 --- a/providers/sru/units/sru.pxu +++ b/providers/sru/units/sru.pxu @@ -135,7 +135,6 @@ nested_part: # Now we ask to switch to the integrated graphics card. after-suspend-graphics-integrated-gpu-cert-automated # after-suspend-monitor-integrated-gpu-cert-automated # not defined - com.canonical.certification::snap-refresh-revert bootstrap_include: device graphics_card From 5a56038464b0819890b7288b406dbb52af2c26e5 Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Tue, 12 Mar 2024 16:44:57 +0800 Subject: [PATCH 072/108] Add TemplateUnit.__repr__ and modify its __str__ method (Bugfix) (#1049) Add TemplateUnit.__repr__ and modify its __str__ method Update TemplateUnit.__str__ to use the new template_id field for more accuracy. Add proper representation, similar to JobDefinition objects. --- checkbox-ng/plainbox/impl/unit/template.py | 6 +++++- .../plainbox/impl/unit/test_template.py | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/checkbox-ng/plainbox/impl/unit/template.py b/checkbox-ng/plainbox/impl/unit/template.py index ac65fa6ed4..126353264a 100644 --- a/checkbox-ng/plainbox/impl/unit/template.py +++ b/checkbox-ng/plainbox/impl/unit/template.py @@ -187,7 +187,11 @@ def instantiate_template(cls, data, raw_data, origin, provider, parameters, def __str__(self): """String representation of Template unit objects.""" - return "{} <~ {}".format(self.id, self.resource_id) + return "{} <~ {}".format(self.template_id, self.resource_id) + + def __repr__(self): + return "".format( + self.template_id) @property def unit(self): diff --git a/checkbox-ng/plainbox/impl/unit/test_template.py b/checkbox-ng/plainbox/impl/unit/test_template.py index 08d3dcceff..98b3587071 100644 --- a/checkbox-ng/plainbox/impl/unit/test_template.py +++ b/checkbox-ng/plainbox/impl/unit/test_template.py @@ -42,6 +42,25 @@ class TemplateUnitTests(TestCase): + def test_str(self): + template = TemplateUnit({ + "template-resource": "resource", + "template-id": "check-devices", + "id": "check-device-{dev_name}", + }) + self.assertEqual(str(template), "check-devices <~ resource") + + def test_repr(self): + template = TemplateUnit({ + "template-resource": "resource", + "template-id": "check-devices", + "id": "check-device-{dev_name}", + }) + self.assertEqual( + repr(template), + "" + ) + def test_id(self): template = TemplateUnit({ "template-resource": "resource", From 94459f99326c42f92f25ec3929ec4956213bb342 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Tue, 12 Mar 2024 10:01:32 +0100 Subject: [PATCH 073/108] Clarified text and updated glyph (Infra) (#1051) Clarified text and updated glyph --- docs/reference/job-status.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/reference/job-status.rst b/docs/reference/job-status.rst index f73ce91985..df33b40a0c 100644 --- a/docs/reference/job-status.rst +++ b/docs/reference/job-status.rst @@ -33,12 +33,13 @@ reasons: - Job with a ``require`` constraint that can not be satisfied - Job with a dependency on a job that is skipped itself -- Job explicitly skipped by the user via the ``launcher`` +- Job is ``manual``, ``user-interact`` or ``user-interact-verify`` but the + session is ``silent`` - Job explicitly skipped by the user via the Ctrl+C menu - Job explicitly skipped by the user via the resume screen -Every skipped job is either marked by the following symbol ``" "`` (white -space) or the text ``job skipped``. Checkbox internally uses the +Every skipped job is either marked by the following symbol ``☐`` (ballot +box) or the text ``job skipped``. Checkbox internally uses the ``IJobResult.OUTCOME_SKIP`` to mark these jobs. Failed Jobs From 708dfdc8f40961273d02c95d606607237831009b Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Tue, 12 Mar 2024 10:17:09 +0100 Subject: [PATCH 074/108] Pass github token to the can_promote_edge script (infra) (#1053) Pass github token to the can_promote_edge script --- .github/workflows/checkbox-beta-release.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/checkbox-beta-release.yml b/.github/workflows/checkbox-beta-release.yml index ff570d8e39..538c79e839 100644 --- a/.github/workflows/checkbox-beta-release.yml +++ b/.github/workflows/checkbox-beta-release.yml @@ -26,6 +26,8 @@ jobs: with: fetch-depth: 0 - name: Verify Promotion Conditions + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | tools/release/can_promote_edge.py From 4b7b77671c0ae879f37c539c15ba54225d62e36c Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Tue, 12 Mar 2024 17:27:56 +0800 Subject: [PATCH 075/108] Apply overrides even when using template id in test plan definition (BugFix) (#1052) * Apply overrides even when using template id in test plan definition If an override is described next of a template id in a test plan definition, this override is now applied to every jobs instantiated from this template. Fix #1046 * Refactor SessionDeviceContext._bulk_override_update * Add unit tests for _override_update and _bulk_override_update * Remove vendorized mock and refactor test_state.py * Fix test to run with Python 3.5 --- checkbox-ng/plainbox/impl/session/state.py | 16 +++--- .../plainbox/impl/session/test_state.py | 57 +++++++++++++++---- 2 files changed, 53 insertions(+), 20 deletions(-) diff --git a/checkbox-ng/plainbox/impl/session/state.py b/checkbox-ng/plainbox/impl/session/state.py index 46eb54fbc8..658772d84b 100644 --- a/checkbox-ng/plainbox/impl/session/state.py +++ b/checkbox-ng/plainbox/impl/session/state.py @@ -548,20 +548,20 @@ def _invalidate_override_map(self, *args, **kwargs): self.invalidate_shared(self._CACHE_OVERRIDE_MAP) def _bulk_override_update(self): - # NOTE: there is an O(N) algorithm for that solves this but it is more - # complicated than I was able to write without a hard-copy reference - # that describes it. I will improve this method once I complete the - # required research. for job_state in self.state.job_state_map.values(): job = job_state.job - for pattern, override_list in self.override_map.items(): - if re.match(pattern, job.id): - job_state.apply_overrides(override_list) + self._override_update(job) def _override_update(self, job): + """ + Apply overrides to job if they are directly related or apply to the + template the job was instantiated from. + """ job_state = self.state.job_state_map[job.id] for pattern, override_list in self.override_map.items(): - if re.match(pattern, job.id): + if re.match(pattern, job.id) or ( + job.template_id and re.match(pattern, job.template_id) + ): job_state.apply_overrides(override_list) def _update_mandatory_job_list(self): diff --git a/checkbox-ng/plainbox/impl/session/test_state.py b/checkbox-ng/plainbox/impl/session/test_state.py index 7f1dec63fe..702fb822c9 100644 --- a/checkbox-ng/plainbox/impl/session/test_state.py +++ b/checkbox-ng/plainbox/impl/session/test_state.py @@ -24,6 +24,9 @@ from doctest import DocTestSuite from doctest import REPORT_NDIFF from unittest import TestCase +from unittest.mock import MagicMock +from unittest.mock import Mock +from unittest.mock import patch from plainbox.abc import IJobResult from plainbox.impl.depmgr import DependencyDuplicateError @@ -38,6 +41,7 @@ from plainbox.impl.session import InhibitionCause from plainbox.impl.session import SessionState from plainbox.impl.session import UndesiredJobReadinessInhibitor +from plainbox.impl.session.state import JobState from plainbox.impl.session.state import SessionDeviceContext from plainbox.impl.session.state import SessionMetaData from plainbox.impl.testing_utils import make_job @@ -45,7 +49,6 @@ from plainbox.impl.unit.category import CategoryUnit from plainbox.impl.unit.unit_with_id import UnitWithId from plainbox.suspend_consts import Suspend -from plainbox.vendor import mock from plainbox.vendor.morris import SignalTestCase @@ -464,9 +467,9 @@ def test_mandatory_jobs_are_first_in_run_list(self): def test_system_information_collection_called(self): getter = SessionState.system_information.__get__ - self_mock = mock.MagicMock() + self_mock = MagicMock() self_mock._system_information = None - with mock.patch( + with patch( "plainbox.impl.session.state.collect_system_information" ) as collect_system_information_mock: return_value = getter(self_mock) @@ -479,15 +482,15 @@ def test_system_information_collection_cached(self): getter = SessionState.system_information.__get__ setter = SessionState.system_information.__set__ - self_mock = mock.MagicMock() + self_mock = MagicMock() self_mock._system_information = None - with mock.patch( + with patch( "plainbox.impl.session.state.collect_system_information" ) as collect_system_information_mock: setter(self_mock, {"inxi": {}}) self.assertFalse(collect_system_information_mock.called) - with mock.patch( + with patch( "plainbox.impl.session.state.collect_system_information" ) as collect_system_information_mock: return_value = getter(self_mock) @@ -503,7 +506,7 @@ class SessionStateTrimTests(TestCase): def setUp(self): self.job_a = make_job("a") self.job_b = make_job("b") - self.origin = mock.Mock(name="origin", spec_set=Origin) + self.origin = Mock(name="origin", spec_set=Origin) self.session = SessionState([self.job_a, self.job_b]) def test_trim_does_remove_jobs(self): @@ -734,7 +737,7 @@ def test_normal_job_result_updates(self): InhibitionCause.UNDESIRED, ) - @mock.patch("plainbox.impl.ctrl.logger") + @patch("plainbox.impl.ctrl.logger") def test_resource_job_with_broken_output(self, mock_logger): # This function checks how SessionState parses partially broken # resource jobs. A JobResult with broken output is constructed below. @@ -995,13 +998,13 @@ def test_app_id_kwarg_to_init(self): class SessionDeviceContextTests(SignalTestCase): def setUp(self): self.ctx = SessionDeviceContext() - self.provider = mock.Mock(name="provider", spec_set=Provider1) - self.unit = mock.Mock(name="unit", spec_set=UnitWithId) + self.provider = Mock(name="provider", spec_set=Provider1) + self.unit = Mock(name="unit", spec_set=UnitWithId) self.unit.provider = self.provider self.provider.unit_list = [self.unit] self.provider.problem_list = [] - self.job = mock.Mock(name="job", spec_set=JobDefinition, siblings=None) - self.job.get_flag_set = mock.Mock(return_value=()) + self.job = Mock(name="job", spec_set=JobDefinition, siblings=None) + self.job.get_flag_set = Mock(return_value=()) self.job.Meta.name = "job" def test_smoke(self): @@ -1218,3 +1221,33 @@ def test_on_job_removed__via_state(self): sig2 = self.assertSignalFired(self.ctx.state.on_unit_removed, self.job) sig3 = self.assertSignalFired(self.ctx.state.on_job_removed, self.job) self.assertSignalOrdering(sig1, sig2, sig3) + + def test_override_update(self): + """ + Check that JobState.apply_overrides is called if the override matches a + job id or the template_id this job has been instantiated from. + """ + self_mock = MagicMock() + self_mock.override_map = { + "^test-tpl$": [("certification_status", "blocker")], + "^job1$": [("certification_status", "blocker")], + } + job1 = Mock(id="job1", template_id=None) + tpl_job = Mock(id="job2", template_id="test-tpl") + SessionDeviceContext._override_update(self_mock, job1) + self.assertTrue( + self_mock.state.job_state_map[job1.id].apply_overrides.called + ) + SessionDeviceContext._override_update(self_mock, tpl_job) + self.assertTrue( + self_mock.state.job_state_map[tpl_job.id].apply_overrides.called + ) + + def test_bulk_override_update(self): + self_mock = MagicMock() + job_state1 = Mock(spec=JobState) + self_mock.state.job_state_map = { + "job1": job_state1, + } + SessionDeviceContext._bulk_override_update(self_mock) + self.assertTrue(self_mock._override_update.called) From 48cd03ac4eb142c830db5bda4e15992fd93e9f73 Mon Sep 17 00:00:00 2001 From: kissiel Date: Tue, 12 Mar 2024 17:24:24 +0100 Subject: [PATCH 076/108] Don't diff results of the network test (BugFix) (#1057) don't diff results of the network test --- providers/base/units/suspend/suspend.pxu | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/base/units/suspend/suspend.pxu b/providers/base/units/suspend/suspend.pxu index 4ae1bfa509..677694a147 100644 --- a/providers/base/units/suspend/suspend.pxu +++ b/providers/base/units/suspend/suspend.pxu @@ -401,7 +401,7 @@ id: suspend/network_after_suspend estimated_duration: 20.0 depends: suspend/suspend_advanced_auto suspend/network_before_suspend _description: Test the network after resuming. -command: network_wait.sh; gateway_ping_test.py | diff "$PLAINBOX_SESSION_SHARE"/network_before_suspend.txt - +command: network_wait.sh; gateway_ping_test.py user: root plugin: shell @@ -410,7 +410,7 @@ id: suspend/network_after_suspend_auto estimated_duration: 20.0 depends: suspend/suspend_advanced_auto suspend/network_before_suspend _description: Test the network after resuming. -command: network_wait.sh; gateway_ping_test.py | diff "$PLAINBOX_SESSION_SHARE"/network_before_suspend.txt - +command: network_wait.sh; gateway_ping_test.py user: root plugin: shell From 2202949f84441da479c6aa81112b24d1cb26fef1 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 13 Mar 2024 11:55:00 +0100 Subject: [PATCH 077/108] Increase step timeout to 10h for snaps and 13h for debs build (infra) (#1059) * Increase step timeout to 10h and 13h for debs Minor: move work to small workers as we don't need large here * Force top level timeout * Move timeout inside job --- .github/workflows/checkbox-core-snap-daily-builds.yml | 5 ++++- .github/workflows/checkbox-snap-daily-builds.yml | 5 ++++- .github/workflows/daily-builds.yml | 2 +- .github/workflows/deb-daily-builds.yml | 4 +++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.github/workflows/checkbox-core-snap-daily-builds.yml b/.github/workflows/checkbox-core-snap-daily-builds.yml index cc81fc432c..92c235ece7 100644 --- a/.github/workflows/checkbox-core-snap-daily-builds.yml +++ b/.github/workflows/checkbox-core-snap-daily-builds.yml @@ -10,7 +10,8 @@ jobs: matrix: releases: [16, 18, 20, 22] arch: [amd64, arm64, armhf] - runs-on: [self-hosted, linux, large] + runs-on: [self-hosted, linux, small] + timeout-minutes: 1200 #20h, this will timeout sooner due to inner timeouts env: SERIES: series${{ matrix.releases }} SNAPCRAFT_STORE_CREDENTIALS: ${{ secrets.SNAPCRAFT7_CREDS }} @@ -38,6 +39,7 @@ jobs: echo "Building at: https://git.launchpad.net/~ce-certification-qa/+snap/$SNAPCRAFT_BUILDER_ID" - uses: Wandalen/wretry.action@a163f62ae554a8f3cbe27b23db15b60c0ae2e93c # v1.3.0 name: Build the snap + timeout-minutes: 600 # 10hours with: action: Hook25/action-build@707dce252c4f367b6c1afe61ed577f7413cf7912 attempt_delay: 600000 # 10min @@ -62,6 +64,7 @@ jobs: path: checkbox-core-snap/series${{ matrix.releases }}/*.snap - uses: Wandalen/wretry.action@a163f62ae554a8f3cbe27b23db15b60c0ae2e93c # v1.3.0 name: Upload the snap to the store + timeout-minutes: 600 # 10hours with: attempt_delay: 600000 # 10min attempt_limit: 10 diff --git a/.github/workflows/checkbox-snap-daily-builds.yml b/.github/workflows/checkbox-snap-daily-builds.yml index 92ed1a9a05..fbe0220e31 100644 --- a/.github/workflows/checkbox-snap-daily-builds.yml +++ b/.github/workflows/checkbox-snap-daily-builds.yml @@ -11,7 +11,8 @@ jobs: matrix: type: [classic, uc] releases: [16, 18, 20, 22] - runs-on: [self-hosted, linux, large] + runs-on: [self-hosted, linux, small] + timeout-minutes: 1200 #20h, this will timeout sooner due to inner timeouts env: SERIES: series_${{ matrix.type }}${{ matrix.releases }} SNAPCRAFT_STORE_CREDENTIALS: ${{ secrets.SNAPCRAFT7_CREDS }} @@ -40,6 +41,7 @@ jobs: echo "Building at: https://git.launchpad.net/~ce-certification-qa/+snap/$SNAPCRAFT_BUILDER_ID" - uses: Wandalen/wretry.action@a163f62ae554a8f3cbe27b23db15b60c0ae2e93c # v1.3.0 name: Building the snaps + timeout-minutes: 600 # 10hours with: action: Hook25/action-build@707dce252c4f367b6c1afe61ed577f7413cf7912 attempt_delay: 600000 # 10min @@ -64,6 +66,7 @@ jobs: path: checkbox-snap/series_${{ matrix.type }}${{ matrix.releases }}/*.snap - uses: Wandalen/wretry.action@a163f62ae554a8f3cbe27b23db15b60c0ae2e93c # v1.3.0 name: Upload the snaps to the store + timeout-minutes: 600 # 10hours with: attempt_delay: 600000 # 10min attempt_limit: 10 diff --git a/.github/workflows/daily-builds.yml b/.github/workflows/daily-builds.yml index f754f2ed09..03cfac93cf 100644 --- a/.github/workflows/daily-builds.yml +++ b/.github/workflows/daily-builds.yml @@ -7,7 +7,7 @@ on: jobs: check_for_commits: - runs-on: [self-hosted, linux, large] + runs-on: [self-hosted, linux, small] name: Check for commits outputs: new_commit_count: ${{ steps.commit_check.outputs.new_commit_count }} diff --git a/.github/workflows/deb-daily-builds.yml b/.github/workflows/deb-daily-builds.yml index 780f28dec6..854eef0a48 100644 --- a/.github/workflows/deb-daily-builds.yml +++ b/.github/workflows/deb-daily-builds.yml @@ -7,7 +7,8 @@ on: jobs: ppa_update: name: Sync PPA history with monorepo - runs-on: [self-hosted, linux, large] + runs-on: [self-hosted, linux, small] + timeout-minutes: 1200 #20h, this will timeout sooner due to inner timeouts steps: - name: Install dependencies run: | @@ -65,6 +66,7 @@ jobs: tools/release/lp_update_recipe.py checkbox --recipe ${{ matrix.recipe }} --new-version $(tools/release/get_version.py --dev-suffix --output-format deb) --revision $GITHUB_SHA - uses: Wandalen/wretry.action@a163f62ae554a8f3cbe27b23db15b60c0ae2e93c # v1.3.0 name: Build and wait result + timeout-minutes: 780 # 13hours env: LP_CREDENTIALS: ${{ secrets.LP_CREDS }} PYTHONUNBUFFERED: 1 From c192174234287a4976b8f53ccf231ed78c6d2a51 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 13 Mar 2024 16:27:08 +0100 Subject: [PATCH 078/108] Replace `create` with `up --no-start` in docker tests (bugfix) (#1061) replace create with up --no-start Here --no-start was used because removing --no-start and the subsequent start seems to hang the test --- providers/docker/units/docker.pxu | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/providers/docker/units/docker.pxu b/providers/docker/units/docker.pxu index a0104e55c0..2c8f3e691c 100644 --- a/providers/docker/units/docker.pxu +++ b/providers/docker/units/docker.pxu @@ -285,7 +285,7 @@ command: EOF ) echo "$yml" | {compose_command} -f - pull - echo "$yml" | {compose_command} -f - create + echo "$yml" | {compose_command} -f - up --no-start echo "$yml" | {compose_command} -f - start ID=$(echo "$yml" | {compose_command} -f - ps -q test) [ `docker inspect -f {status} $ID` != running ] && exit 1 @@ -446,11 +446,11 @@ command: set -ex compose_file={root_dir}/docker-compose-california-0.6.1.yml wget https://raw.githubusercontent.com/edgexfoundry/developer-scripts/04c933f2c03bb9b212e1035505fed0a386f4d43e/compose-files/docker-compose-california-0.6.1.yml -O $compose_file - for svc in volume config-seed mongo logging notifications metadata data command scheduler export-client export-distro rulesengine device-virtual; do + for svc in volume config-seed mongo logging notifications metadata data command scheduler export-client export-distro rulesengine device-virtual; do {compose_command} --file $compose_file up -d $svc sleep 60 done - for svc in volume mongo logging notifications metadata data command scheduler export-client export-distro device-virtual; do + for svc in volume mongo logging notifications metadata data command scheduler export-client export-distro device-virtual; do status=$(docker inspect -f '{status}' $({compose_command} --file $compose_file ps -q $svc)) if [ "$status" != "running" ]; then echo "service $svc is supposed to be running, but currently has status: $status" From 539ec189bfa33d88bed26b278ba8da5588c888b0 Mon Sep 17 00:00:00 2001 From: kissiel Date: Wed, 13 Mar 2024 18:09:33 +0100 Subject: [PATCH 079/108] fix the name of the Alder Lake codename (BugFix) (#1062) * fix the name of the Alder Lake codename * preserve the column alignment --- providers/base/bin/cpuid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/providers/base/bin/cpuid.py b/providers/base/bin/cpuid.py index cf33ad361e..64340b040b 100755 --- a/providers/base/bin/cpuid.py +++ b/providers/base/bin/cpuid.py @@ -201,7 +201,7 @@ def cpuid_to_human_friendly(cpuid: str) -> str: "Sapphire Rapids": ['0x806f3', '0x806f6', '0x806f7', '0x806f8'], "Skylake": ['0x406e3', '0x506e3', '0x50654', '0x50652'], "Tiger Lake": ['0x806c1'], - "Aderlake": ['0x906a4', '0x906A3', '0x90675', '0x90672'], + "Alder Lake": ['0x906a4', '0x906A3', '0x90675', '0x90672'], "Westmere": ['0x2065', '0x206c', '0x206f'], "Whisky Lake": ['0x806eb', '0x806ec'], } From 71f0d20e8f2d0bb380de236a519965909cc0f356 Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Wed, 13 Mar 2024 18:50:47 +0100 Subject: [PATCH 080/108] Cover smaller genio tests (New) (#1060) * Formatted dvfs test and removed typos * Added test for dvfs_gpu_check_governors * Raise SystemExit only on fail * Added serialcheck tests * Fixed Error raise when stdout is empty * Added spidev test * Added pmic regulator tests * Fixed errors in linux_ccf - Moved check_env_variables to function so the module can be imported without requiring them - Added choices for devices considered in the test - Test failing with wrong verify output * Added linux ccf tests * Update contrib/genio/bin/spidev_test.py Co-authored-by: kissiel --------- Co-authored-by: kissiel --- contrib/genio/bin/dvfs_gpu_check_governors.py | 28 ++-- contrib/genio/bin/linux_ccf.py | 91 ++++++----- contrib/genio/bin/serialcheck.py | 7 +- contrib/genio/bin/spidev_test.py | 5 +- .../tests/test_dvfs_gpu_check_governors.py | 55 +++++++ contrib/genio/tests/test_linux_ccf.py | 141 ++++++++++++++++++ contrib/genio/tests/test_pmic_regulator.py | 66 ++++++++ contrib/genio/tests/test_serialcheck.py | 117 +++++++++++++++ contrib/genio/tests/test_spidev.py | 131 ++++++++++++++++ 9 files changed, 584 insertions(+), 57 deletions(-) create mode 100644 contrib/genio/tests/test_dvfs_gpu_check_governors.py create mode 100644 contrib/genio/tests/test_linux_ccf.py create mode 100644 contrib/genio/tests/test_pmic_regulator.py create mode 100644 contrib/genio/tests/test_serialcheck.py create mode 100644 contrib/genio/tests/test_spidev.py diff --git a/contrib/genio/bin/dvfs_gpu_check_governors.py b/contrib/genio/bin/dvfs_gpu_check_governors.py index 4e6b5aa932..c8c254013e 100755 --- a/contrib/genio/bin/dvfs_gpu_check_governors.py +++ b/contrib/genio/bin/dvfs_gpu_check_governors.py @@ -2,41 +2,43 @@ import argparse -GOVERNORS = ['userspace', 'powersave', 'performance', 'simple_ondemand'] -print(f'Expected Governos: {GOVERNORS}') +GOVERNORS = ["userspace", "powersave", "performance", "simple_ondemand"] +print(f"Expected Governors: {GOVERNORS}") def test_sysfs_attrs_read(soc): fail = 0 - mail_type = '13000000.mali' - if soc == 'mt8365': - mail_type = '13040000.mali' + mail_type = "13000000.mali" + if soc == "mt8365": + mail_type = "13040000.mali" node_path = ( - f'/sys/devices/platform/soc/{mail_type}/devfreq/{mail_type}/' - f'available_governors' + f"/sys/devices/platform/soc/{mail_type}/devfreq/{mail_type}/" + f"available_governors" ) with open(node_path) as f: for node in f.read().strip().split(): if node not in GOVERNORS: fail = 1 - print(f"Failed: found governor '{node}' out of expextation") + print( + f"Failed: found governor '{node}' out of expectation" + ) return fail def main(): parser = argparse.ArgumentParser() parser.add_argument( - 'soc', - help='SoC type. e.g mt8395', - choices=['mt8395', 'mt8390', 'mt8365'] + "soc", + help="SoC type. e.g mt8395", + choices=["mt8395", "mt8390", "mt8365"], ) args = parser.parse_args() ret = test_sysfs_attrs_read(args.soc) if ret: exit(1) - print('Pass') + print("Pass") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/contrib/genio/bin/linux_ccf.py b/contrib/genio/bin/linux_ccf.py index db2d6d08ab..29381b6e24 100755 --- a/contrib/genio/bin/linux_ccf.py +++ b/contrib/genio/bin/linux_ccf.py @@ -4,15 +4,8 @@ import argparse import subprocess -PLAINBOX_SESSION_SHARE = os.environ.get('PLAINBOX_SESSION_SHARE') -if not PLAINBOX_SESSION_SHARE: - print("no env var PLAINBOX_SESSION_SHARE") - exit(1) - -PLAINBOX_PROVIDER_DATA = os.environ.get('PLAINBOX_PROVIDER_DATA') -if not PLAINBOX_PROVIDER_DATA: - print("no env var PLAINBOX_PROVIDER_DATA") - exit(1) +PLAINBOX_SESSION_SHARE = os.environ.get("PLAINBOX_SESSION_SHARE") +PLAINBOX_PROVIDER_DATA = os.environ.get("PLAINBOX_PROVIDER_DATA") def runcmd(command): @@ -22,61 +15,81 @@ def runcmd(command): stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", - timeout=1 + timeout=1, ) return ret +def check_env_variables(): + if not PLAINBOX_SESSION_SHARE: + print("no env var PLAINBOX_SESSION_SHARE") + exit(1) + if not PLAINBOX_PROVIDER_DATA: + print("no env var PLAINBOX_PROVIDER_DATA") + exit(1) + + def test_linux_ccf(soc): - if soc == 'mt8365': - print('mt8365 is not supported') + if soc == "mt8365": + print("mt8365 is not supported") exit(1) clk_summary_path = f"{PLAINBOX_SESSION_SHARE}/clk-summary.txt" cat_ret = runcmd( - [f"cat /sys/kernel/debug/clk/clk_summary | tee {clk_summary_path}"]) + [f"cat /sys/kernel/debug/clk/clk_summary | tee {clk_summary_path}"] + ) if cat_ret.returncode: - print(f'Failed: unable to dump clk_summary data to {clk_summary_path}') + print(f"Failed: unable to dump clk_summary data to {clk_summary_path}") exit(1) - print('Dump /sys/kernel/debug/clk/clk_summary:') + print("Dump /sys/kernel/debug/clk/clk_summary:") print(cat_ret.stdout) - if soc == 'mt8390': - verify_ret = runcmd([ - ( - f"verify-mt8188-ccf.sh" - f" -t {PLAINBOX_PROVIDER_DATA}/linux-ccf/mt8188-clk.h" - f" -s {clk_summary_path}" - ) - ]) - elif soc == 'mt8395' or soc == 'mt8195': - verify_ret = runcmd([ - ( - f"verify-mt8195-ccf.sh" - f" -t {PLAINBOX_PROVIDER_DATA}/linux-ccf/mt8195-clk.h" - f" -s {clk_summary_path}" - ) - ]) + if soc == "mt8390": + verify_ret = runcmd( + [ + ( + f"verify-mt8188-ccf.sh" + f" -t {PLAINBOX_PROVIDER_DATA}/linux-ccf/mt8188-clk.h" + f" -s {clk_summary_path}" + ) + ] + ) + elif soc == "mt8395" or soc == "mt8195": + verify_ret = runcmd( + [ + ( + f"verify-mt8195-ccf.sh" + f" -t {PLAINBOX_PROVIDER_DATA}/linux-ccf/mt8195-clk.h" + f" -s {clk_summary_path}" + ) + ] + ) if verify_ret.returncode: - print(f'Failed: {verify_ret.stdout}') + print(f"Failed: {verify_ret.stdout}") exit(1) - if verify_ret.stdout.split('\n')[0] \ - == '[-] Success, all clocks are mapped !': - print('Test Pass') + if ( + verify_ret.stdout.split("\n")[0] + != "[-] Success, all clocks are mapped !" + ): + print(f"Wrong output: {verify_ret.stdout}") + exit(1) + + print("Test Pass") def main(): parser = argparse.ArgumentParser() parser.add_argument( - 'soc', - help='SoC type. e.g mt8395', - choices=['mt8395', 'mt8390'] + "soc", + help="SoC type. e.g mt8395", + choices=["mt8395", "mt8195", "mt8390", "mt8390"], ) args = parser.parse_args() + check_env_variables() test_linux_ccf(args.soc) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/contrib/genio/bin/serialcheck.py b/contrib/genio/bin/serialcheck.py index 2468fb8f3c..cd363fbf2e 100755 --- a/contrib/genio/bin/serialcheck.py +++ b/contrib/genio/bin/serialcheck.py @@ -36,17 +36,18 @@ def test_uart_by_serialcheck(soc): 38400, 19200, 9600, 4800, 2400, 1200, 600, 300, 110 ] - fail = 0 + fail = False for br in available_baudrate: print('\n' + '*' * 80) print(f'Testing baudrate: {br}\n') ret = runcmd([cmd.format(tty_node, file_path, br)]) print(ret.stdout) if ret.returncode != 0 or ret.stdout.split('\n')[-2] != golden_msg: - fail = 1 + fail = True print('Fail: the output doesn\'t match the golden sample') - raise SystemExit(fail) + if fail: + raise SystemExit(1) def main(): diff --git a/contrib/genio/bin/spidev_test.py b/contrib/genio/bin/spidev_test.py index d4816cba52..1af658b837 100755 --- a/contrib/genio/bin/spidev_test.py +++ b/contrib/genio/bin/spidev_test.py @@ -43,10 +43,11 @@ def test_spi_content_consistency(platform): spi_ret = runcmd([cmd]) print(spi_ret.stdout) - packets = spi_ret.stdout.split('\n') - if not len(packets): + if not spi_ret.stdout: raise SystemExit( 'ERROR: no any output be reported') + + packets = spi_ret.stdout.split('\n') for rx, tx in zip(packets[-2:-1], packets[-3:-2]): tx_content = tx.split('|')[2] rx_content = rx.split('|')[2] diff --git a/contrib/genio/tests/test_dvfs_gpu_check_governors.py b/contrib/genio/tests/test_dvfs_gpu_check_governors.py new file mode 100644 index 0000000000..b0702f3c40 --- /dev/null +++ b/contrib/genio/tests/test_dvfs_gpu_check_governors.py @@ -0,0 +1,55 @@ +import unittest +from unittest.mock import mock_open, patch +import dvfs_gpu_check_governors as dvfs + + +class TestDvfsGpuCheckGovernors(unittest.TestCase): + + def test_all_expected_governors(self): + # Test when all expected governors are present + governors = "userspace powersave performance simple_ondemand" + with patch("builtins.open", mock_open(read_data=governors)): + result = dvfs.test_sysfs_attrs_read("mt8365") + self.assertEqual(result, 0) + + with patch("builtins.open", mock_open(read_data=governors)): + result = dvfs.test_sysfs_attrs_read("mt8364") + self.assertEqual(result, 0) + + def test_unexpected_governor(self): + # Test when an unexpected governor is present + governors = "userspace powersave performance unexpected_governor" + with patch("builtins.open", mock_open(read_data=governors)): + result = dvfs.test_sysfs_attrs_read("mt8365") + self.assertEqual(result, 1) + + @patch("builtins.open", mock_open(read_data="")) + def test_empty_file(self): + # Test when the file is empty + governors = "" + with patch("builtins.open", mock_open(read_data=governors)): + result = dvfs.test_sysfs_attrs_read("mt8365") + self.assertEqual(result, 0) + + @patch("dvfs_gpu_check_governors.test_sysfs_attrs_read") + def test_main(self, mock_attrs_read): + mock_attrs_read.return_value = 0 + with patch("sys.argv", ["script_name", "mt8395"]): + result = dvfs.main() + self.assertEqual(mock_attrs_read.call_count, 1) + self.assertEqual(result, None) + + @patch("dvfs_gpu_check_governors.test_sysfs_attrs_read") + def test_main_bad_args(self, mock_attrs_read): + with patch("sys.argv", ["script_name", "bad_soc"]): + with self.assertRaises(SystemExit): + dvfs.main() + self.assertEqual(mock_attrs_read.call_count, 0) + + @patch("dvfs_gpu_check_governors.test_sysfs_attrs_read") + def test_main_wrong_attrs(self, mock_attrs_read): + mock_attrs_read.return_value = 1 + with patch("sys.argv", ["script_name", "mt8395"]): + with self.assertRaises(SystemExit): + dvfs.main() + self.assertEqual(mock_attrs_read.call_count, 1) diff --git a/contrib/genio/tests/test_linux_ccf.py b/contrib/genio/tests/test_linux_ccf.py new file mode 100644 index 0000000000..59551accc1 --- /dev/null +++ b/contrib/genio/tests/test_linux_ccf.py @@ -0,0 +1,141 @@ +import unittest +from unittest.mock import patch, MagicMock +import linux_ccf as ccf + + +class TestLinuxCCF(unittest.TestCase): + @patch("linux_ccf.subprocess.run") + def test_runcmd(self, mock_run): + mock_run.return_value = MagicMock( + stdout="output", stderr="error", returncode=0 + ) + result = ccf.runcmd("echo Hello") + self.assertEqual(result.stdout, "output") + self.assertEqual(result.stderr, "error") + self.assertEqual(result.returncode, 0) + + @patch("linux_ccf.PLAINBOX_SESSION_SHARE", "/share") + @patch("linux_ccf.PLAINBOX_PROVIDER_DATA", "/tmp") + def test_check_env_variables(self): + self.assertEqual(ccf.check_env_variables(), None) + + @patch("linux_ccf.PLAINBOX_SESSION_SHARE", "") + @patch("linux_ccf.PLAINBOX_PROVIDER_DATA", "/tmp") + def test_check_session_share_not_defined(self): + with self.assertRaises(SystemExit): + ccf.check_env_variables() + + @patch("linux_ccf.PLAINBOX_SESSION_SHARE", "/share") + @patch("linux_ccf.PLAINBOX_PROVIDER_DATA", "") + def test_check_provider_data_not_defined(self): + with self.assertRaises(SystemExit): + ccf.check_env_variables() + + @patch("linux_ccf.runcmd") + def test_test_linux_ccf(self, mock_runcmd): + mock_runcmd.side_effect = [ + MagicMock(returncode=0), + MagicMock( + stdout="[-] Success, all clocks are mapped !", + stderr="", + returncode=0, + ), + ] + ccf.test_linux_ccf("mt8390") + mock_runcmd.assert_called() + + def test_test_linux_ccf_fails_with_mt8365(self): + with self.assertRaises(SystemExit): + ccf.test_linux_ccf("mt8365") + + @patch("linux_ccf.runcmd") + def test_test_linux_ccf_fail_clk_summary(self, mock_runcmd): + mock_runcmd.return_value = MagicMock( + stdout="", + stderr="error", + returncode=1, + ) + with self.assertRaises(SystemExit): + ccf.test_linux_ccf("mt8390") + + @patch("linux_ccf.runcmd") + @patch("linux_ccf.PLAINBOX_PROVIDER_DATA", "/tmp") + @patch("linux_ccf.PLAINBOX_SESSION_SHARE", "/share") + def test_test_linux_ccf_mt8390(self, mock_runcmd): + mock_runcmd.side_effect = [ + MagicMock(returncode=0), + MagicMock( + stdout="[-] Success, all clocks are mapped !", + stderr="", + returncode=0, + ), + ] + ccf.test_linux_ccf("mt8390") + mock_runcmd.assert_called_with( + [ + "verify-mt8188-ccf.sh -t /tmp/linux-ccf/mt8188-clk.h" + " -s /share/clk-summary.txt" + ] + ) + + @patch("linux_ccf.runcmd") + def test_test_linux_ccf_fail_verify(self, mock_runcmd): + mock_runcmd.side_effect = [ + MagicMock(returncode=0), + MagicMock(returncode=1), + ] + with self.assertRaises(SystemExit): + ccf.test_linux_ccf("mt8390") + + @patch("linux_ccf.runcmd") + def test_test_linux_ccf_fail_verify_wrong_output(self, mock_runcmd): + mock_runcmd.side_effect = [ + MagicMock(returncode=0), + MagicMock(stdout="", returncode=0), + ] + with self.assertRaises(SystemExit): + ccf.test_linux_ccf("mt8390") + + @patch("linux_ccf.runcmd") + @patch("linux_ccf.PLAINBOX_PROVIDER_DATA", "/tmp") + @patch("linux_ccf.PLAINBOX_SESSION_SHARE", "/share") + def test_test_linux_ccf_mt8395_or_mt8195(self, mock_runcmd): + mock_runcmd.side_effect = [ + MagicMock(returncode=0), + MagicMock( + stdout="[-] Success, all clocks are mapped !", + stderr="", + returncode=0, + ), + ] + cmd = [ + "verify-mt8195-ccf.sh -t /tmp/linux-ccf/mt8195-clk.h" + " -s /share/clk-summary.txt" + ] + ccf.test_linux_ccf("mt8195") + mock_runcmd.assert_called_with(cmd) + + @patch("linux_ccf.check_env_variables") + @patch("linux_ccf.test_linux_ccf") + def test_main(self, mock_test_linux_ccf, mock_check_env): + with patch("sys.argv", ["soc", "mt8395"]): + result = ccf.main() + self.assertEqual(mock_test_linux_ccf.call_count, 1) + self.assertEqual(result, None) + + @patch("linux_ccf.check_env_variables") + @patch("linux_ccf.test_linux_ccf") + def test_main_bad_args(self, mock_test_linux_ccf, mock_check_env): + with patch("sys.argv", ["script_name", "bad_soc"]): + with self.assertRaises(SystemExit): + ccf.main() + mock_test_linux_ccf.assert_not_called() + + @patch("linux_ccf.check_env_variables") + @patch("linux_ccf.test_linux_ccf") + def test_main_wrong_ccf(self, mock_test_linux_ccf, mock_check_env): + mock_test_linux_ccf.side_effect = SystemExit(1) + with patch("sys.argv", ["script_name", "mt8395"]): + with self.assertRaises(SystemExit): + ccf.main() + mock_test_linux_ccf.assert_called_once_with("mt8395") diff --git a/contrib/genio/tests/test_pmic_regulator.py b/contrib/genio/tests/test_pmic_regulator.py new file mode 100644 index 0000000000..60f69d028c --- /dev/null +++ b/contrib/genio/tests/test_pmic_regulator.py @@ -0,0 +1,66 @@ +import unittest +from unittest.mock import patch, mock_open +import pmic_regulator + + +class TestRegulator(unittest.TestCase): + + @patch("os.path.exists") + @patch("builtins.open", new_callable=mock_open, read_data="attr_1") + def test_read_attr(self, mock_file, mock_exists): + mock_exists.return_value = True + result = pmic_regulator.read_attr("attribute") + self.assertEqual(result, "attr_1") + + @patch("os.path.exists") + @patch("builtins.open", new_callable=mock_open, read_data="") + def test_read_attr_not_exists(self, mock_file, mock_exists): + mock_exists.return_value = False + result = pmic_regulator.read_attr("attribute") + self.assertEqual(result, "") + + @patch("pmic_regulator.read_attr") + def test_read_all_name(self, mock_read_attr): + mock_read_attr.side_effect = ["node1", "node2", ""] + result = pmic_regulator.read_all_name() + self.assertEqual(result, {"node1", "node2"}) + + @patch("pmic_regulator.read_all_name") + def test_regulator(self, mock_read_all_name): + mock_read_all_name.return_value = pmic_regulator.mt8365_MAIN_REGULATORS + result = pmic_regulator.test_regulator("mt8365") + self.assertEqual(result, None) + + @patch("pmic_regulator.read_all_name") + def test_regulator_mt8390(self, mock_read_all_name): + mock_read_all_name.return_value = pmic_regulator.MAIN_REGULATORS + result = pmic_regulator.test_regulator("mt8390") + self.assertEqual(result, None) + + @patch("pmic_regulator.read_all_name") + def test_regulator_missing_node(self, mock_read_all_name): + mock_read_all_name.return_value = ["wrong_node"] + with self.assertRaises(SystemExit): + pmic_regulator.test_regulator("mt8365") + + @patch("pmic_regulator.test_regulator") + def test_main(self, mock_test_regulator): + with patch("sys.argv", ["script_name", "mt8395"]): + result = pmic_regulator.main() + self.assertEqual(mock_test_regulator.call_count, 1) + self.assertEqual(result, None) + + @patch("pmic_regulator.test_regulator") + def test_main_bad_args(self, mock_test_regulator): + with patch("sys.argv", ["script_name", "bad_soc"]): + with self.assertRaises(SystemExit): + pmic_regulator.main() + mock_test_regulator.assert_not_called() + + @patch("pmic_regulator.test_regulator") + def test_main_wrong_serialcheck(self, mock_test_regulator): + mock_test_regulator.side_effect = SystemExit(1) + with patch("sys.argv", ["script_name", "mt8395"]): + with self.assertRaises(SystemExit): + pmic_regulator.main() + mock_test_regulator.assert_called_once_with("mt8395") diff --git a/contrib/genio/tests/test_serialcheck.py b/contrib/genio/tests/test_serialcheck.py new file mode 100644 index 0000000000..40a29cc447 --- /dev/null +++ b/contrib/genio/tests/test_serialcheck.py @@ -0,0 +1,117 @@ +import unittest +from unittest.mock import patch, MagicMock +import serialcheck as sc + + +class TestSerialCheck(unittest.TestCase): + + @patch("serialcheck.subprocess.run") + def test_runcmd(self, mock_run): + mock_run.return_value = MagicMock( + stdout="output", stderr="error", returncode=0 + ) + result = sc.runcmd("echo Hello") + + mock_run.assert_called_once() + self.assertEqual(result.stdout, "output") + self.assertEqual(result.stderr, "error") + self.assertEqual(result.returncode, 0) + + @patch("serialcheck.runcmd") + @patch("os.environ.get") + def test_uart_by_sc(self, mock_get, mock_runcmd): + mock_get.return_value = "/tmp" + + # Mock the runcmd function to return the correct message + msg = ( + "cts: 0 dsr: 0 rng: 0 dcd: 0 rx: 12288 " + "tx: 12288 frame 0 ovr 0 par: 0 brk: 0 buf_ovrr: 0\n" + ) + # The first command is to copy the file, so we don't need the output + results = [""] + [MagicMock(stdout=msg, stderr="", returncode=0)] * 17 + mock_runcmd.side_effect = results + + self.assertEqual(sc.test_uart_by_serialcheck("mt8390"), None) + mock_runcmd.assert_called_with( + [ + "genio-test-tool.serialcheck -d /dev/ttyS2 -f /tmp/binary " + "-m d -l 3 -b 110" + ], + ) + + @patch("serialcheck.runcmd") + @patch("os.environ.get") + def test_uart_by_sc_mt8395(self, mock_get, mock_runcmd): + mock_get.return_value = "/tmp" + + # Mock the runcmd function to return the correct message + msg = ( + "cts: 0 dsr: 0 rng: 0 dcd: 0 rx: 12288 " + "tx: 12288 frame 0 ovr 0 par: 0 brk: 0 buf_ovrr: 0\n" + ) + # The first command is to copy the file, so we don't need the output + results = [""] + [MagicMock(stdout=msg, stderr="", returncode=0)] * 17 + mock_runcmd.side_effect = results + + self.assertEqual(sc.test_uart_by_serialcheck("mt8395"), None) + mock_runcmd.assert_called_with( + [ + "genio-test-tool.serialcheck -d /dev/ttyS1 -f /tmp/binary " + "-m d -l 3 -b 110" + ], + ) + + @patch("serialcheck.runcmd") + @patch("os.environ.get") + def test_uart_by_sc_bad_return_code(self, mock_get, mock_runcmd): + mock_get.return_value = "/tmp" + + # Mock the runcmd function to return a wrong message + msg = ( + "cts: 0 dsr: 0 rng: 0 dcd: 0 rx: 12288 " + "tx: 12288 frame 0 ovr 0 par: 0 brk: 0 buf_ovrr: 0\n" + ) + # The first command is to copy the file, so we don't need the output + results = [""] + [MagicMock(stdout=msg, stderr="", returncode=1)] * 17 + mock_runcmd.side_effect = results + + with self.assertRaises(SystemExit): + sc.test_uart_by_serialcheck("mt8395") + + @patch("serialcheck.runcmd") + @patch("os.environ.get") + def test_uart_by_sc_wrong_output(self, mock_get, mock_runcmd): + mock_get.return_value = "/tmp" + + # Mock the runcmd function to return a wrong message + msg = "output\nBad message\nend output" + # The first command is to copy the file, so we don't need the output + results = [""] + [MagicMock(stdout=msg, stderr="", returncode=0)] * 17 + mock_runcmd.side_effect = results + + with self.assertRaises(SystemExit): + sc.test_uart_by_serialcheck("mt8395") + + @patch("serialcheck.test_uart_by_serialcheck") + def test_main(self, mock_serialcheck): + mock_serialcheck.return_value = 0 + with patch("sys.argv", ["script_name", "mt8395"]): + result = sc.main() + self.assertEqual(mock_serialcheck.call_count, 1) + self.assertEqual(result, None) + + @patch("serialcheck.test_uart_by_serialcheck") + def test_main_bad_args(self, mock_serialcheck): + mock_serialcheck.return_value = 1 + with patch("sys.argv", ["script_name", "bad_soc"]): + with self.assertRaises(SystemExit): + sc.main() + mock_serialcheck.assert_not_called() + + @patch("serialcheck.test_uart_by_serialcheck") + def test_main_wrong_serialcheck(self, mock_serialcheck): + mock_serialcheck.side_effect = SystemExit(1) + with patch("sys.argv", ["script_name", "mt8395"]): + with self.assertRaises(SystemExit): + sc.main() + self.assertEqual(mock_serialcheck.call_count, 1) diff --git a/contrib/genio/tests/test_spidev.py b/contrib/genio/tests/test_spidev.py new file mode 100644 index 0000000000..949d431544 --- /dev/null +++ b/contrib/genio/tests/test_spidev.py @@ -0,0 +1,131 @@ +import unittest +from unittest.mock import patch, MagicMock +import spidev_test as spidev + + +class TestSpidev(unittest.TestCase): + + @patch("spidev_test.subprocess.run") + def test_runcmd(self, mock_run): + mock_run.return_value = MagicMock( + stdout="output", stderr="error", returncode=0 + ) + result = spidev.runcmd("echo Hello") + + mock_run.assert_called_once() + self.assertEqual(result.stdout, "output") + self.assertEqual(result.stderr, "error") + self.assertEqual(result.returncode, 0) + + @patch("spidev_test.os.path.exists") + def test_check_spi_node(self, mock_exists): + mock_exists.return_value = True + result = spidev.check_spi_node("/dev/spidev0.0") + self.assertEqual(result, None) + + @patch("spidev_test.os.path.exists") + def test_check_spi_node_fail(self, mock_exists): + mock_exists.return_value = False + with self.assertRaises(SystemExit): + spidev.check_spi_node("/dev/spidev0.0") + + @patch("spidev_test.PLAINBOX_PROVIDER_DATA", "/tmp") + @patch("spidev_test.runcmd") + @patch("spidev_test.check_spi_node") + def test_test_spi_content(self, mock_check_spi, mock_runcmd): + mock_check_spi.return_value = None + mock_runcmd.return_value = MagicMock( + stdout=( + "TX | FF FF FF FF FF FF | ......\n" + "RX | FF FF FF FF FF FF | ......\n" + ), + stderr="", + returncode=0, + ) + + result = spidev.test_spi_content_consistency("G1200-evk") + self.assertEqual(result, None) + mock_runcmd.assert_called_with( + [ + "genio-test-tool.spidev-test -D /dev/spidev1.0 -s 400000 -i " + "/tmp/spi/test.bin -v", + ], + ) + + @patch("spidev_test.PLAINBOX_PROVIDER_DATA", "/tmp") + @patch("spidev_test.runcmd") + @patch("spidev_test.check_spi_node") + def test_test_spi_content_G700(self, mock_check_spi, mock_runcmd): + mock_check_spi.return_value = None + mock_runcmd.return_value = MagicMock( + stdout=( + "TX | FF FF FF FF FF FF | ......\n" + "RX | FF FF FF FF FF FF | ......\n" + ), + stderr="", + returncode=0, + ) + + result = spidev.test_spi_content_consistency("G700") + self.assertEqual(result, None) + mock_runcmd.assert_called_with( + [ + "genio-test-tool.spidev-test -D /dev/spidev0.0 -s 400000 -i " + "/tmp/spi/test.bin -v", + ], + ) + + @patch("spidev_test.PLAINBOX_PROVIDER_DATA", "/tmp") + @patch("spidev_test.runcmd") + @patch("spidev_test.check_spi_node") + def test_test_spi_content_no_packets(self, mock_check_spi, mock_runcmd): + mock_check_spi.return_value = None + mock_runcmd.return_value = MagicMock( + stdout="", + stderr="", + returncode=0, + ) + with self.assertRaises(SystemExit): + spidev.test_spi_content_consistency("G1200-evk") + + @patch("spidev_test.PLAINBOX_PROVIDER_DATA", "/tmp") + @patch("spidev_test.runcmd") + @patch("spidev_test.check_spi_node") + def test_test_spi_content_no_consistency( + self, mock_check_spi, mock_runcmd + ): + mock_check_spi.return_value = None + mock_runcmd.return_value = MagicMock( + stdout=( + "TX | FF FF FF FF FF FF | ......\n" + "RX | 31 31 31 31 31 31 | 111111\n" + ), + stderr="", + returncode=0, + ) + with self.assertRaises(SystemExit): + spidev.test_spi_content_consistency("G1200-evk") + + @patch("spidev_test.test_spi_content_consistency") + def test_main(self, mock_spi_content): + mock_spi_content.return_value = 0 + with patch("sys.argv", ["platform", "G1200-evk"]): + result = spidev.main() + self.assertEqual(mock_spi_content.call_count, 1) + self.assertEqual(result, None) + + @patch("spidev_test.test_spi_content_consistency") + def test_main_bad_args(self, mock_spi_content): + mock_spi_content.return_value = 1 + with patch("sys.argv", ["script_name", "bad_soc"]): + with self.assertRaises(SystemExit): + spidev.main() + mock_spi_content.assert_not_called() + + @patch("spidev_test.test_spi_content_consistency") + def test_main_wrong_serialcheck(self, mock_spi_content): + mock_spi_content.side_effect = SystemExit(1) + with patch("sys.argv", ["script_name", "G1200-evk"]): + with self.assertRaises(SystemExit): + spidev.main() + self.assertEqual(mock_spi_content.call_count, 1) From 41b9f584b51c659bc8f2054aa32ea8654428801b Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Thu, 14 Mar 2024 14:42:20 +0100 Subject: [PATCH 081/108] Cover cpu idle genio (New) (#1063) * Fixed typo on cpu_idle * Added tests for cpu_idle --- contrib/genio/bin/cpu_idle.py | 4 +- contrib/genio/tests/test_cpu_idle.py | 273 +++++++++++++++++++++++++++ 2 files changed, 275 insertions(+), 2 deletions(-) create mode 100644 contrib/genio/tests/test_cpu_idle.py diff --git a/contrib/genio/bin/cpu_idle.py b/contrib/genio/bin/cpu_idle.py index 529709e6dc..fd6e4e64bc 100755 --- a/contrib/genio/bin/cpu_idle.py +++ b/contrib/genio/bin/cpu_idle.py @@ -37,7 +37,7 @@ def error_handler(node_type, node_path, expect, reality): print( ( f"Failed: " - f"the expected {node_type} value of node '{node_path}'" + f"the expected {node_type} value of node '{node_path}' " f"should be '{expect}' but got '{reality}'" ) ) @@ -45,7 +45,7 @@ def error_handler(node_type, node_path, expect, reality): print( ( f"Failed: " - f"the expected usage value of node '{node_path}'" + f"the expected usage value of node '{node_path}' " f"should grater than 0" ) ) diff --git a/contrib/genio/tests/test_cpu_idle.py b/contrib/genio/tests/test_cpu_idle.py new file mode 100644 index 0000000000..3c3a877a10 --- /dev/null +++ b/contrib/genio/tests/test_cpu_idle.py @@ -0,0 +1,273 @@ +import unittest +from unittest.mock import patch, mock_open +import cpu_idle as cpu + + +class TestCpuIdle(unittest.TestCase): + @patch("os.path.exists") + def test_read_attr(self, mock_exists): + mock_exists.return_value = True + with patch("builtins.open", mock_open(read_data="test")): + self.assertEqual(cpu.read_attr("test_path"), "test") + + @patch("os.path.exists") + def test_read_attr_no_file(self, mock_exists): + mock_exists.return_value = False + self.assertEqual(cpu.read_attr("test_path"), "") + + @patch("cpu_idle.read_attr") + def test_read_attr_num(self, mock_read_attr): + mock_read_attr.return_value = "10" + self.assertEqual(cpu.read_attr_num("test_path"), 10) + + @patch("cpu_idle.read_attr") + def test_read_attr_num_no_file(self, mock_read_attr): + mock_read_attr.return_value = "" + self.assertEqual(cpu.read_attr_num("test_path"), -1) + + @patch("cpu_idle.read_attr") + def test_read_attr_num_str(self, mock_read_attr): + mock_read_attr.return_value = "test" + with self.assertRaises(ValueError): + cpu.read_attr_num("test_path") + + @patch("cpu_idle.read_attr") + def test_read_idle_attr(self, mock_read_attr): + mock_read_attr.return_value = "test" + self.assertEqual(cpu.read_idle_attr(0, 0, "name"), "test") + mock_read_attr.assert_called_once_with("cpu0/cpuidle/state0/name") + + @patch("cpu_idle.read_attr_num") + def test_read_idle_attr_num(self, mock_read_attr_num): + mock_read_attr_num.return_value = 10 + self.assertEqual(cpu.read_idle_attr_num(0, 0, "usage"), 10) + mock_read_attr_num.assert_called_once_with("cpu0/cpuidle/state0/usage") + + @patch("builtins.print") + def test_error_handler_with_name(self, mock_print): + cpu.error_handler("name", "path/to/node", "expect", "reality") + mock_print.assert_called_once_with( + "Failed: the expected name value of node 'path/to/node' should be " + "'expect' but got 'reality'" + ) + + @patch("builtins.print") + def test_error_handler_with_disable(self, mock_print): + cpu.error_handler("disable", "path/to/node", "expect", "reality") + mock_print.assert_called_once_with( + "Failed: the expected disable value of node 'path/to/node' " + "should be 'expect' but got 'reality'" + ) + + @patch("builtins.print") + def test_error_handler_with_usage(self, mock_print): + cpu.error_handler("usage", "path/to/node", "expect", "reality") + mock_print.assert_called_once_with( + "Failed: the expected usage value of node 'path/to/node' " + "should grater than 0" + ) + + def test_output_checker(self): + result = cpu.output_checker(0, 0, ("test", "test"), (0, 0), 1) + self.assertEqual(result, None) + + @patch("cpu_idle.error_handler") + def test_output_checker_name_mismatch(self, mock_error_handler): + with self.assertRaises(SystemExit): + cpu.output_checker(0, 0, ("test", "expected"), (0, 0), 1) + + @patch("cpu_idle.error_handler") + def test_output_checker_disable_mismatch(self, mock_error_handler): + with self.assertRaises(SystemExit): + cpu.output_checker(0, 0, ("test", "test"), (0, 1), 1) + + @patch("cpu_idle.error_handler") + def test_output_checker_usage_zero(self, mock_error_handler): + with self.assertRaises(SystemExit): + cpu.output_checker(0, 0, ("test", "test"), (0, 0), 0) + + @patch("cpu_idle.read_idle_attr") + @patch("cpu_idle.read_idle_attr_num") + @patch("cpu_idle.output_checker") + def test_mt8395_cases( + self, mock_output_checker, mock_read_attr_num, mock_read_attr + ): + mock_read_attr.side_effect = ["WFI", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_wfi() + mock_output_checker.assert_called_with( + 0, 0, name=("WFI", "WFI"), disable=("0", "0"), usage=1 + ) + + self.assertEqual(cpu.test_mcdi_cpu("mt8395"), None) + + self.assertEqual(cpu.test_mcdi_cluster("mt8395"), None) + + self.assertEqual(cpu.test_dpidle("mt8395"), None) + + mock_read_attr.side_effect = ["clusteroff_l", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_clusteroff_l("mt8395") + mock_output_checker.assert_called_with( + 0, + 2, + name=("clusteroff_l", "clusteroff_l"), + disable=("0", "0"), + usage=1, + ) + + mock_read_attr.side_effect = ["clusteroff_b", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_clusteroff_b("mt8395") + mock_output_checker.assert_called_with( + 4, + 2, + name=("clusteroff_b", "clusteroff_b"), + disable=("0", "0"), + usage=1, + ) + + mock_read_attr.side_effect = ["cpuoff_l", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_cpuoff_l("mt8395") + mock_output_checker.assert_called_with( + 0, 1, name=("cpuoff_l", "cpuoff_l"), disable=("0", "0"), usage=1 + ) + + mock_read_attr.side_effect = ["cpuoff_b", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_cpuoff_b("mt8395") + mock_output_checker.assert_called_with( + 4, 1, name=("cpuoff_b", "cpuoff_b"), disable=("0", "0"), usage=1 + ) + + @patch("cpu_idle.read_idle_attr") + @patch("cpu_idle.read_idle_attr_num") + @patch("cpu_idle.output_checker") + def test_mt8390_cases( + self, mock_output_checker, mock_read_attr_num, mock_read_attr + ): + mock_read_attr.side_effect = ["WFI", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_wfi() + mock_output_checker.assert_called_with( + 0, 0, name=("WFI", "WFI"), disable=("0", "0"), usage=1 + ) + + self.assertEqual(cpu.test_mcdi_cpu("mt8390"), None) + + self.assertEqual(cpu.test_mcdi_cluster("mt8390"), None) + + self.assertEqual(cpu.test_dpidle("mt8390"), None) + + mock_read_attr.side_effect = ["clusteroff-l", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_clusteroff_l("mt8390") + mock_output_checker.assert_called_with( + 0, + 2, + name=("clusteroff-l", "clusteroff-l"), + disable=("0", "0"), + usage=1, + ) + + mock_read_attr.side_effect = ["clusteroff-b", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_clusteroff_b("mt8390") + mock_output_checker.assert_called_with( + 6, + 2, + name=("clusteroff-b", "clusteroff-b"), + disable=("0", "0"), + usage=1, + ) + + mock_read_attr.side_effect = ["cpuoff-l", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_cpuoff_l("mt8390") + mock_output_checker.assert_called_with( + 0, 1, name=("cpuoff-l", "cpuoff-l"), disable=("0", "0"), usage=1 + ) + + mock_read_attr.side_effect = ["cpuoff-b", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_cpuoff_b("mt8390") + mock_output_checker.assert_called_with( + 6, 1, name=("cpuoff-b", "cpuoff-b"), disable=("0", "0"), usage=1 + ) + + @patch("cpu_idle.read_idle_attr") + @patch("cpu_idle.read_idle_attr_num") + @patch("cpu_idle.output_checker") + def test_mt8365_cases( + self, mock_output_checker, mock_read_attr_num, mock_read_attr + ): + mock_read_attr.side_effect = ["WFI", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_wfi() + mock_output_checker.assert_called_with( + 0, 0, name=("WFI", "WFI"), disable=("0", "0"), usage=1 + ) + + mock_read_attr.side_effect = ["mcdi-cpu", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_mcdi_cpu("mt8365") + mock_output_checker.assert_called_with( + 0, 1, name=("mcdi-cpu", "mcdi-cpu"), disable=("0", "0"), usage=1 + ) + + mock_read_attr.side_effect = ["mcdi-cluster", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_mcdi_cluster("mt8365") + mock_output_checker.assert_called_with( + 0, + 2, + name=("mcdi-cluster", "mcdi-cluster"), + disable=("0", "0"), + usage=1, + ) + + mock_read_attr.side_effect = ["dpidle", "0"] + mock_read_attr_num.return_value = 1 + cpu.test_dpidle("mt8365") + mock_output_checker.assert_called_with( + 0, 3, name=("dpidle", "dpidle"), disable=("0", "0"), usage=1 + ) + + self.assertEqual(cpu.test_clusteroff_l("mt8365"), None) + + self.assertEqual(cpu.test_clusteroff_b("mt8365"), None) + + self.assertEqual(cpu.test_cpuoff_l("mt8365"), None) + + self.assertEqual(cpu.test_cpuoff_b("mt8365"), None) + + def test_main_cases(self): + cases = { + "wfi": "test_wfi", + "mcdi-cpu": "test_mcdi_cpu", + "mcdi-cluster": "test_mcdi_cluster", + "dpidle": "test_dpidle", + "clusteroff-l": "test_clusteroff_l", + "clusteroff-b": "test_clusteroff_b", + "cpuoff-l": "test_cpuoff_l", + "cpuoff-b": "test_cpuoff_b", + } + + for case, func in cases.items(): + args = ["soc", "mt8395", "--case", case] + with patch("cpu_idle." + func) as mock_test: + with patch("sys.argv", args): + cpu.main() + mock_test.assert_called_once() + + @patch("cpu_idle.test_wfi", return_value=None) + def test_main_wrong_soc(self, mock_test_wfi): + with self.assertRaises(SystemExit): + with patch("sys.argv", ["soc", "bad_soc", "--case", "wfi"]): + cpu.main() + + def test_main_wrong_case(self): + with self.assertRaises(SystemExit): + with patch("sys.argv", ["soc", "mt8395", "--case", "bad_case"]): + cpu.main() From cf606f406c3b81ac7b08cd0b19847b5a81491231 Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Thu, 14 Mar 2024 14:45:32 +0100 Subject: [PATCH 082/108] Cover boot partition genio (New) (#1066) Added boot_partition unittests --- contrib/genio/tests/test_boot_partition.py | 200 +++++++++++++++++++++ 1 file changed, 200 insertions(+) create mode 100644 contrib/genio/tests/test_boot_partition.py diff --git a/contrib/genio/tests/test_boot_partition.py b/contrib/genio/tests/test_boot_partition.py new file mode 100644 index 0000000000..85ca49cc7c --- /dev/null +++ b/contrib/genio/tests/test_boot_partition.py @@ -0,0 +1,200 @@ +import unittest +from unittest.mock import patch, MagicMock +import boot_partition as bp + + +class TestBootPartition(unittest.TestCase): + + def setUp(self): + self.pbd = bp.TestPartedBootDevice() + + @patch("boot_partition.subprocess.run") + def test_runcmd(self, mock_run): + mock_run.return_value = MagicMock( + stdout="output", stderr="error", returncode=0 + ) + result = bp.runcmd("echo Hello") + + mock_run.assert_called_once() + self.assertEqual(result.stdout, "output") + self.assertEqual(result.stderr, "error") + self.assertEqual(result.returncode, 0) + + @patch("pathlib.Path.is_block_device") + def test_check_is_block_device(self, mock_is_block_device): + self.pbd.path = "/dev/sdc" + + mock_is_block_device.return_value = True + self.pbd.check_is_block_device() + + mock_is_block_device.return_value = False + with self.assertRaises(SystemExit): + self.pbd.check_is_block_device() + + @patch("boot_partition.TestPartedBootDevice.check_sector_size") + @patch("boot_partition.TestPartedBootDevice.check_partitions") + def test_check_disk(self, mock_cp, mock_css): + self.pbd.check_disk() + self.pbd.check_sector_size.assert_called_once() + self.pbd.check_partitions.assert_called_once() + + @patch("boot_partition.runcmd") + def test_get_disk_information(self, mock_runcmd): + json_data = ( + '{"disk": {"logical-sector-size": 4096, "physical-sector-size": ' + '4096, "partitions": [ {"number": 1, "name": "bootloaders"} ] } }' + ) + + mock_runcmd.return_value.stdout = json_data + + self.pbd.path = "/dev/sdc" + self.pbd.get_disk_information() + self.assertEqual( + self.pbd.expected_result, + self.pbd.expected_result_UFS, + "Failed to get expected result for UFS", + ) + + self.pbd.path = "/dev/mmcblk0" + self.pbd.get_disk_information() + self.assertEqual( + self.pbd.expected_result, + self.pbd.expected_result_EMMC, + "Failed to get expected result for EMMC", + ) + + self.pbd.path = "/dev/unknown" + with self.assertRaises(SystemExit): + self.pbd.get_disk_information() + + def test_check_sector_size(self): + self.pbd.expected_result = { + "logical-sector-size": 4096, + "physical-sector-size": 4096, + } + + # Correct sector size + self.pbd.actual_result = { + "logical-sector-size": 4096, + "physical-sector-size": 4096, + } + self.pbd.check_sector_size() + + # Different logical-sector-size + self.pbd.actual_result = { + "logical-sector-size": 8192, + "physical-sector-size": 4096, + } + with self.assertRaises(SystemExit): + self.pbd.check_sector_size() + + # Different logical-sector-size not found + self.pbd.actual_result = {"physical-sector-size": 4096} + with self.assertRaises(SystemExit): + self.pbd.check_sector_size() + + # Different physical-sector-size + self.pbd.actual_result = { + "logical-sector-size": 4096, + "physical-sector-size": 8192, + } + with self.assertRaises(SystemExit): + self.pbd.check_sector_size() + + # Different physical-sector-size not found + self.pbd.actual_result = {"logical-sector-size": 4096} + with self.assertRaises(SystemExit): + self.pbd.check_sector_size() + + def test_check_partitions(self): + self.pbd.expected_result = { + "partitions": [{"number": 1, "name": "bootloaders"}] + } + + # Correct partitions + self.pbd.actual_result = { + "partitions": [{"number": 1, "name": "bootloaders"}] + } + self.pbd.check_partitions() + + # Different lenght of partitions + self.pbd.actual_result = { + "partitions": [ + {"number": 1, "name": "bootloaders"}, + {"number": 9, "name": "writable"}, + ] + } + with self.assertRaises(SystemExit): + self.pbd.check_partitions() + + # Different partition number + self.pbd.actual_result = { + "partitions": [{"number": 2, "name": "bootloaders"}] + } + with self.assertRaises(SystemExit): + self.pbd.check_partitions() + + # Different partition name + self.pbd.actual_result = { + "partitions": [{"number": 1, "name": "bad_name"}] + } + with self.assertRaises(SystemExit): + self.pbd.check_partitions() + + # Different partitions not found + self.pbd.actual_result = {} + with self.assertRaises(SystemExit): + self.pbd.check_partitions() + + @patch("boot_partition.runcmd") + def test_check_device(self, mock_runcmd): + mock_runcmd.return_value.stdout = "sdc" + self.pbd.check_device(True) + + mock_runcmd.return_value.stdout = "mmcblk0" + self.pbd.check_device(True) + + mock_runcmd.return_value.stdout = "unknown" + self.pbd.check_device(False) + + mock_runcmd.return_value.stdout = "unknown" + with self.assertRaises(SystemExit): + self.pbd.check_device(True) + + @patch("boot_partition.TestPartedBootDevice.check_device") + @patch("boot_partition.TestPartedBootDevice.check_is_block_device") + @patch("boot_partition.TestPartedBootDevice.get_disk_information") + @patch("boot_partition.TestPartedBootDevice.check_disk") + def test_main_with_path( + self, mock_check_disk, mock_get_disk, mock_is_block, mock_check_dev + ): + args = ["script_name", "--path", "/dev/sda"] + with patch("sys.argv", args): + self.pbd.main() + mock_check_dev.assert_not_called() + mock_is_block.assert_called_once() + mock_get_disk.assert_called_once() + mock_check_disk.assert_called_once() + + @patch("boot_partition.TestPartedBootDevice.check_device") + @patch("boot_partition.TestPartedBootDevice.check_is_block_device") + def test_main_check_device(self, mock_is_block, mock_check_dev): + # Test with --check_device_name flag + args = ["script_name", "--check_device_name"] + with patch("sys.argv", args): + self.pbd.main() + mock_check_dev.assert_called_once() + mock_is_block.assert_not_called() + + @patch("boot_partition.TestPartedBootDevice.check_device") + def test_main_check_device_with_exit(self, mock_is_block): + # Test with --check_device_name flag + mock_is_block.return_value = None + args = ["script_name", "--check_device_name", "--exit_when_check_fail"] + with patch("sys.argv", args): + self.pbd.main() + + mock_is_block.side_effect = SystemExit + with self.assertRaises(SystemExit): + with patch("sys.argv", args): + self.pbd.main() \ No newline at end of file From 9ca358b9d16cc445499d8ad7bad9b93fc3731028 Mon Sep 17 00:00:00 2001 From: LiaoU3 <58060146+LiaoU3@users.noreply.github.com> Date: Fri, 15 Mar 2024 17:52:24 +0800 Subject: [PATCH 083/108] Fix feild name for scaling_test and maxfreq_test log attach from after to depeneds (Bugfix) (#1069) Fix flags for scaling_test and maxfreq_test log attach --- providers/base/units/cpu/jobs.pxu | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/base/units/cpu/jobs.pxu b/providers/base/units/cpu/jobs.pxu index dbddbec0c9..6e560970ce 100644 --- a/providers/base/units/cpu/jobs.pxu +++ b/providers/base/units/cpu/jobs.pxu @@ -56,7 +56,7 @@ plugin: attachment category_id: com.canonical.plainbox::cpu id: after-suspend-cpu/scaling_test-log-attach estimated_duration: 1.0 -after: after-suspend-cpu/scaling_test +depends: after-suspend-cpu/scaling_test command: [[ -e "${PLAINBOX_SESSION_SHARE}"/scaling_test_after_suspend.log ]] && cat "${PLAINBOX_SESSION_SHARE}"/scaling_test_after_suspend.log _summary: Attach CPU scaling capabilities log @@ -116,7 +116,7 @@ plugin: attachment category_id: com.canonical.plainbox::cpu id: after-suspend-cpu/maxfreq_test-log-attach estimated_duration: 1.0 -after: after-suspend-cpu/maxfreq_test +depends: after-suspend-cpu/maxfreq_test command: [ -e "$PLAINBOX_SESSION_SHARE"/maxfreq_test.log ] && cat "$PLAINBOX_SESSION_SHARE"/maxfreq_test.log _summary: Attach CPU max frequency log From fbc7fd846aab01557a95ba703da08997d5b71d30 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Fri, 15 Mar 2024 17:17:30 +0100 Subject: [PATCH 084/108] Fix lp copy packages arbitrary lookback (infra) (#1067) * Use date_superseeded and order_by_date instead of time * Update testing also filtering * black test_lp_copy_packages.py --- tools/release/lp_copy_packages.py | 28 +++++++++++++++++++++----- tools/release/test_lp_copy_packages.py | 10 +++++++-- 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/tools/release/lp_copy_packages.py b/tools/release/lp_copy_packages.py index 47c8c0fd33..2d1761bbff 100755 --- a/tools/release/lp_copy_packages.py +++ b/tools/release/lp_copy_packages.py @@ -28,6 +28,7 @@ import sys import datetime import argparse +import itertools from utils import get_launchpad_client @@ -48,11 +49,28 @@ def get_ppa(lp, ppa_name: str, ppa_owner: str): def get_checkbox_packages(ppa): - since_date = datetime.datetime.now() - datetime.timedelta(weeks=4) - # The time ago is needed because else LP api will choke trying to - # return the full history including any published source in the ppa - return ppa.getPublishedSources( - created_since_date=since_date, source_name="checkbox" + """ + Get all the most recent checkbox packages on the PPA that are still current + + A source package is still current when it has not been superseeded by + another. The filtering here is done to avoid copying over outdated + packages to the target PPA + """ + # Note: this is not the same as ppa.getPublishedSources(status="Published") + # the reason is that if a package is Published but for a not + # supported distribution, say Lunar, copying it over will trigger an + # error. When a distribution support is dropped, Launchpad will + # automatically stop building for it and start a grace period for + # updates. This ensures there will always be a pocket of Superseeded + # packages between Published packages for unsupported distro and + # current ones + all_published_sources = ppa.getPublishedSources( + source_name="checkbox", order_by_date=True + ) + # this filters out superseeded packages AND Published packages that are no + # longer current (as they are not being built anymore by Launchpad) + return itertools.takewhile( + lambda x: x.date_superseded is None, all_published_sources ) diff --git a/tools/release/test_lp_copy_packages.py b/tools/release/test_lp_copy_packages.py index da760c088b..21545a4bae 100644 --- a/tools/release/test_lp_copy_packages.py +++ b/tools/release/test_lp_copy_packages.py @@ -12,9 +12,15 @@ def test_main(self, get_launchpad_client_mock): checkbox_dev_user = MagicMock() lp_client.people = {"checkbox-dev": checkbox_dev_user} - source = MagicMock() + source_to_copy = MagicMock(date_superseded=None) + source_no_copy_superseeded = MagicMock(date_superseded="some date") + source_no_copy_outdated_distro = MagicMock(date_superseded=None) + ppas = checkbox_dev_user.getPPAByName() - ppas.getPublishedSources.return_value = [source] * 5 + ppas.getPublishedSources.return_value = [source_to_copy] * 5 + [ + source_no_copy_superseeded, + source_no_copy_outdated_distro, + ] lp_copy_packages.main( ["checkbox-dev", "beta", "checkbox-dev", "stable"] From 44498aa07112594d4634a94af9be4134b9977128 Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Sun, 17 Mar 2024 10:16:18 +0800 Subject: [PATCH 085/108] Re-introduce wireless/check_iwlwifi_microcode_crash_{interface} template (BugFix) (#1058) Re-introduce wireless/check_iwlwifi_microcode_crash_{interface} template Commit da9c843 mistakenly removed this job. --- providers/base/units/wireless/jobs.pxu | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/providers/base/units/wireless/jobs.pxu b/providers/base/units/wireless/jobs.pxu index 09a952c46e..db307d27eb 100644 --- a/providers/base/units/wireless/jobs.pxu +++ b/providers/base/units/wireless/jobs.pxu @@ -576,3 +576,15 @@ command: estimated_duration: 2.0 depends: wireless/nm_connection_save_{interface} flags: preserve-locale also-after-suspend + +unit: template +template-resource: device +template-filter: device.driver == 'iwlwifi' +id: wireless/check_iwlwifi_microcode_crash_{interface} +_summary: Check there have been no iwlwifi crashes +plugin: shell +command: check-iwlwifi-microcode-sw-error.sh +category_id: com.canonical.plainbox::wireless +estimated_duration: 30.0 +flags: preserve-locale also-after-suspend +requires: package.name == 'systemd' From a12b050c58e8440285f2eb6ab4409ebfeea46d03 Mon Sep 17 00:00:00 2001 From: LiaoU3 <58060146+LiaoU3@users.noreply.github.com> Date: Mon, 18 Mar 2024 15:38:07 +0800 Subject: [PATCH 086/108] [checkbox-ce-oem] Make serial test (console and transmit data) automated (New) (#1055) --- .../bin/serial_config_parser.py | 58 +++++ .../bin/serial_test.py | 237 ++++++++++++------ .../units/serial/jobs.pxu | 91 +++++-- .../units/serial/manifest.pxu | 10 +- .../units/serial/test-plan.pxu | 60 +++-- .../units/test-plan-ce-oem.pxu | 6 +- 6 files changed, 326 insertions(+), 136 deletions(-) create mode 100755 contrib/checkbox-provider-ce-oem/bin/serial_config_parser.py diff --git a/contrib/checkbox-provider-ce-oem/bin/serial_config_parser.py b/contrib/checkbox-provider-ce-oem/bin/serial_config_parser.py new file mode 100755 index 0000000000..9d1cee6e43 --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/bin/serial_config_parser.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +import argparse + + +def print_ports_config(string: str): + ports_config_list = string.split() + serials = [] + rs485_nodes = [] + rs422_nodes = [] + for port_config in ports_config_list: + config_parts = port_config.split(":") + if len(config_parts) != 3: + print( + "Error: Invalid format for serial port configuration:", + port_config, + ) + print("Should be 'TYPE:NODE:BAUDRATE'") + raise SystemExit(1) + serial = {} + port_type, port_node, baud_rate = config_parts + serial["type"] = port_type + serial["node"] = port_node + serial["baudrate"] = baud_rate + serials.append(serial) + if port_type == "RS485": + rs485_nodes.append(port_node) + if port_type == "RS422": + rs422_nodes.append(port_node) + + for serial in serials: + print("type: {}".format(serial["type"])) + print("node: {}".format(serial["node"])) + print("baudrate: {}".format(serial["baudrate"])) + print("group: ", end="") + if serial["type"] == "RS485": + for rs485_node in rs485_nodes: + if rs485_node != serial["node"]: + print("{} ".format(rs485_node), end="") + if serial["type"] == "RS422": + for rs422_node in rs422_nodes: + if rs422_node != serial["node"]: + print("{} ".format(rs422_node), end="") + print("\n") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "string", + type=str, + help="The string needed to be parsed", + ) + args = parser.parse_args() + print_ports_config(args.string) + + +if __name__ == "__main__": + main() diff --git a/contrib/checkbox-provider-ce-oem/bin/serial_test.py b/contrib/checkbox-provider-ce-oem/bin/serial_test.py index d8afa419f0..9b69831bcc 100755 --- a/contrib/checkbox-provider-ce-oem/bin/serial_test.py +++ b/contrib/checkbox-provider-ce-oem/bin/serial_test.py @@ -5,19 +5,20 @@ # Written by: # Rick Wu # Stanley Huang +# Vincent Liao """ Whole idea of this RS485/232/422 remote test script is to connet -all rs485/232/422 that on DUT to the server(RPi 3). And test the +all RS232/422/485 that on DUT to the server. And test the port on DUT. """ import sys import argparse import serial +import serial.rs485 import time -import string -import random import logging +import os def init_logger(): @@ -50,148 +51,224 @@ def init_logger(): return root_logger -def str_generator(size): - chars = [] - chars.extend(string.ascii_uppercase) - chars.extend(string.ascii_lowercase) - chars.extend(string.digits) - chars.extend(string.punctuation) +class Serial: + def __init__( + self, + node, + type, + group: list = [], + baudrate: int = 115200, + bytesize: int = serial.EIGHTBITS, + parity: str = serial.PARITY_NONE, + stopbits: int = serial.STOPBITS_ONE, + timeout: int = 1, + data_size: int = 128, + ) -> None: + self.node = node + self.type = type + self.baudrate = baudrate + self.bytesize = bytesize + self.parity = parity + self.stopbits = stopbits + self.timeout = timeout + self.data_size = data_size + self.ser = self.serial_init(node) + self.group = [] + for ser in group: + self.group.append(self.serial_init(ser)) - return "".join(random.choices(chars, k=size)) + def serial_init(self, node: str) -> serial.Serial: + """Create a serial.Serial object based on the class variables""" + ser = serial.Serial( + node, + baudrate=self.baudrate, + bytesize=self.bytesize, + parity=self.parity, + stopbits=self.stopbits, + timeout=self.timeout, + ) + if self.type == "RS485": + ser.rs485_mode = serial.rs485.RS485Settings() + ser.reset_input_buffer() + ser.reset_output_buffer() + return ser + def send(self, data: bytes) -> None: + try: + self.ser.write(data) + logging.info("Sent: {}".format(data.decode())) + except Exception: + logging.exception("Not able to send data!") -def serial_init(device, **kwargs): - ser = serial.Serial( - device, - baudrate=kwargs.get("baudrate", 115200), - bytesize=kwargs.get("bytesize", 8), - parity=kwargs.get("parity", "N"), - stopbits=kwargs.get("stopbits", 1), - timeout=1, - write_timeout=1, - xonxoff=True - ) - return ser - - -def sender(ser, test_str): - try: - ser.write(test_str.encode("utf-8")) - logging.info("Sent: {}".format(test_str)) - except Exception: - logging.error("Not able to send data!") + def recv(self) -> bytes: + rcv = "" + try: + self.ser.rts = False + rcv = self.ser.read(self.data_size) + if rcv: + logging.info("Received: {}".format(rcv.decode())) + except Exception: + logging.exception("Received unmanageable string format") + raise SystemExit(1) + return rcv -def receiver(ser): - """ - If trying to receive string between two different protocols - (e.g. RS485 with RS232). Then it will receive the string - that is not able to decode. So we can handle that kind of - an exception to filter out the string from the different protocols. - """ - rcv = "" - try: - rcv = ser.readline().decode("utf-8") - if rcv: - logging.info("Received: {}".format(rcv)) - except ValueError: - logging.error("Received unmanageable string format") - rcv = "Error format" - return rcv - - -def server_mode(ser): +def server_mode(ser: Serial) -> None: """ Running as a server, it will be sniffing for received string. And it will send the same string out. usage: running on port /dev/ttyUSB0 as a server - $ sudo ./rs485-remote.py /dev/ttyUSB0 --mode server + $ sudo ./serial_test.py /dev/ttyUSB0 --mode server --type USB """ - logging.info("Listening on port {} ...".format(ser._port)) + logging.info("Listening on port {} ...".format(ser.node)) while True: - re_string = receiver(ser) - if re_string: + data = ser.recv() + if data: time.sleep(3) logging.info("Send string back ...") - sender(ser, re_string) - logging.info("Listening on port {} ...".format(ser._port)) - ser.reset_input_buffer() + ser.send(data) + logging.info("Listening on port {} ...".format(ser.node)) -def client_mode(ser, data_length): +def client_mode(ser: Serial, data_size: int = 128): """ Running as a clinet and it will sending out a string and wait the string send back from server. After receive the string, it will check the readback is correct or not. Usage: running on port /dev/ttymxc1 as a client - $ sudo ./rs485-remotr.py /dev/ttymxc1 --mode client + $ sudo ./serial_test.py /dev/ttymxc1 --mode client --type RS485 """ - test_str = "{}-{}".format(ser._port, str_generator(data_length)) - sender(ser, test_str) + data = os.urandom(data_size) + ser.send(data) for i in range(1, 6): logging.info("Attempting receive string... {} time".format(i)) + readback = ser.recv() time.sleep(3) - readback = receiver(ser) if readback: - if readback == test_str: - logging.info("Received string is correct!") + if readback == data: + logging.info("[PASS] Received string is correct!") raise SystemExit(0) else: - logging.error("Received string is incorrect!") + logging.error("[FAIL] Received string is incorrect!") raise SystemExit(1) - logging.error("Not able to receive string!!") + logging.error("[FAIL] Not able to receive string!!") raise SystemExit(1) +def console_mode(ser: Serial): + """ + Test the serial port when it is in console mode + This test requires DUT to loop back it self. + For example: connect the serial console port to the USB port via + serial to usb dongle + """ + try: + # Send 'Enter Key' + logging.info("Sending 'Enter Key'...") + ser.send(os.linesep.encode()) + response = ser.recv().decode() + # ":~$" is the pattern for the DUT after logging in + # "login:" is the pattern for the DUT before logging in + if ":~$" in response or "login:" in response: + logging.info("[PASS] Serial console test successful.") + else: + logging.error("[FAIL] Serial console test failed.") + logging.error( + "Expected response should contain ':~$' or 'login:'" + ) + raise SystemExit(1) + except Exception: + logging.exception("Caught an exception.") + raise SystemExit(1) + + def main(): - parser = argparse.ArgumentParser() - parser.add_argument('device', help='Serial port device e.g. /dev/ttyS1') + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument("node", help="Serial port device node e.g. /dev/ttyS1") parser.add_argument( "--mode", - choices=["server", "client"], + choices=["server", "client", "console"], type=str, - help="set running mode, one if {server, client}", + help="Running mode", required=True, ) - parser.add_argument("--size", default=16, type=int) - parser.add_argument("--baudrate", default=115200, type=int) + parser.add_argument( + "--type", + type=str, + help="The type of serial port (e.g. RS485, RS422, RS232, USB)", + default="USB" + ) + parser.add_argument( + "--group", + type=str, + help="The group of serial ports that needed to be brought up also", + nargs="*", + default=[], + ) + parser.add_argument("--baudrate", + help="Baud rate for the serial ports", + default=115200, type=int) parser.add_argument( "--bytesize", - choices=[5, 6, 7, 8], + choices=[serial.FIVEBITS, serial.SIXBITS, + serial.SEVENBITS, serial.EIGHTBITS], type=int, - help="set bytesize, one of {5, 6, 7, 8}, default: 8", + help="Bytesize", default=8, ) parser.add_argument( "--parity", - choices=["N", "E", "O", "S", "M"], + choices=[serial.PARITY_NONE, serial.PARITY_EVEN, serial.PARITY_ODD, + serial.PARITY_MARK, serial.PARITY_SPACE], type=lambda c: c.upper(), - help="set parity, one of {N E O S M}, default: N", + help="Parity", default="N", ) parser.add_argument( "--stopbits", - choices=[1, 2], + choices=[serial.STOPBITS_ONE, serial.STOPBITS_TWO], + type=int, + help="Stopbits", + default=1, + ) + parser.add_argument( + "--datasize", + type=int, + help="Data size to send and receive", + default=128, + ) + parser.add_argument( + "--timeout", type=int, - help="set stopbits, one of {1, 2}, default: 1", + help="Timeout to receive", default=1, ) args = parser.parse_args() init_logger() - ser = serial_init( - args.device, + ser = Serial( + args.node, + args.type, + args.group, baudrate=args.baudrate, bytesize=args.bytesize, parity=args.parity, stopbits=args.stopbits, + timeout=args.timeout, + data_size=args.datasize, ) if args.mode == "server": server_mode(ser) + elif args.mode == "client": + client_mode(ser, data_size=args.datasize) + elif args.mode == "console": + console_mode(ser) else: - client_mode(ser, args.size) + raise SystemExit(1) if __name__ == "__main__": diff --git a/contrib/checkbox-provider-ce-oem/units/serial/jobs.pxu b/contrib/checkbox-provider-ce-oem/units/serial/jobs.pxu index 0d45780fa5..b381bb13c0 100644 --- a/contrib/checkbox-provider-ce-oem/units/serial/jobs.pxu +++ b/contrib/checkbox-provider-ce-oem/units/serial/jobs.pxu @@ -1,36 +1,85 @@ -id: ce-oem-serial/rs485-list -_summary: Generates a RS485 resource based on user supplied configuration +id: ce-oem-serial/serial-console-list +_summary: + Generates a serial console resource based on user supplied configuration _description: - A RS485 resource that relies on the user specifying the number of RS485 port. + A serial console resource that relies on the user + specifying the number of serial console port. This is to allow template jobs to then be instantiated. - Usage of parameter: {port1} {port2} - RS485_PORTS=/dev/ttymxc1 /dev/ttymxc2 + TYPE:NODE:BAUDRATE + SERIAL_CONSOLE_PORTS=USB:/dev/ttyUSB1:115200 plugin: resource estimated_duration: 1.0 +environ: + SERIAL_CONSOLE_PORTS command: - for x in $RS485_PORTS; do - echo "RS485: ${x}" - echo "" - done + if [ -z "$SERIAL_CONSOLE_PORTS" ]; then + exit 0 + fi + serial_config_parser.py "$SERIAL_CONSOLE_PORTS" unit: template -template-resource: ce-oem-serial/rs485-list +template-resource: ce-oem-serial/serial-console-list template-unit: job -id: ce-oem-serial/rs485-remote-{RS485} -_summary: To check the port {RS485} can working on RS485 half-duplex mode. -_purpose: - To check the port {RS485} can send and receive the string with - RS485 half-duplex mode. +template-engine: jinja2 +template-id: ce-oem-serial/serial-console-tests +id: ce-oem-serial/serial-console-{{ type }}-{{ node }}-{{ baudrate }} +imports: from com.canonical.plainbox import manifest +requires: + manifest.has_serial_console_loopback == True +template-summary: To check if the serial ports can work as a console +_summary: To check if the serial port {{ type }} ({{ node }}) can work as a console +_purpose: + To check the serial port {{ type }} ({{ node }}) can work as a console. _description: - Have to connect the RS485 A on DUT to RS485 A on RPi 3 server, - and RS485 B on DUT to RS485 B on RPi 3 server. + Have to connect the serial port back to itself + before starting this test plugin: shell user: root category_id: com.canonical.certification::serial estimated_duration: 30 flags: also-after-suspend -command: serial_test.py {RS485} --mode client -requires: - manifest.has_rs485_server == 'True' - manifest.has_rs485 == 'True' +command: + serial_test.py {{ node }} --mode console --type {{ type }} --baudrate {{ baudrate }} + +id: ce-oem-serial/serial-list +_summary: + Generates a serial resource based on user supplied configuration +_description: + A serial resource that relies on the user + specifying the number of serial port. + This is to allow template jobs to then be instantiated. + TYPE:NODE:BAUDRATE + SERIAL_PORTS="RS485:/dev/ttyS0:9600 RS485:/dev/ttyS1:9600 RS232:/dev/ttyS2:115200" +plugin: resource +estimated_duration: 1.0 +environ: + SERIAL_PORTS +command: + if [ -z "$SERIAL_PORTS" ]; then + exit 0 + fi + serial_config_parser.py "$SERIAL_PORTS" + +unit: template +template-resource: ce-oem-serial/serial-list +template-unit: job +template-engine: jinja2 +template-id: ce-oem-serial/serial-transmit-data-tests +id: ce-oem-serial/serial-transmit-data-{{ type }}-{{ node }}-{{ baudrate }} imports: from com.canonical.plainbox import manifest +requires: + manifest.has_serial_ehco_server == True +template-summary: + Transmit data via {{ type }} ({{ node }}) with baudate {{ baudrate }} +_purpose: + To check the serial port {{ type }} ({{ node }}) can transmit + data with baudate {{ baudrate }} +_description: + Have to connect the serial port to serial testing server +plugin: shell +user: root +category_id: com.canonical.certification::serial +estimated_duration: 30 +flags: also-after-suspend +command: + serial_test.py {{ node }} --mode client --type {{ type }} --group {{ group }} --baudrate {{ baudrate }} diff --git a/contrib/checkbox-provider-ce-oem/units/serial/manifest.pxu b/contrib/checkbox-provider-ce-oem/units/serial/manifest.pxu index d8128ff81c..7f12958453 100644 --- a/contrib/checkbox-provider-ce-oem/units/serial/manifest.pxu +++ b/contrib/checkbox-provider-ce-oem/units/serial/manifest.pxu @@ -1,9 +1,9 @@ unit: manifest entry -id: has_rs485_server -_name: Has RS485 connect to RS485 test server? +id: has_serial_ehco_server +_name: Has the serial ports connetec to the serial echo server? value-type: bool unit: manifest entry -id: has_rs485 -_name: Does platform supported RS485 serial? -value-type: bool \ No newline at end of file +id: has_serial_console_loopback +_name: Does serial console port loopback to itself? +value-type: bool diff --git a/contrib/checkbox-provider-ce-oem/units/serial/test-plan.pxu b/contrib/checkbox-provider-ce-oem/units/serial/test-plan.pxu index 63042112ef..f1b93bf6e8 100644 --- a/contrib/checkbox-provider-ce-oem/units/serial/test-plan.pxu +++ b/contrib/checkbox-provider-ce-oem/units/serial/test-plan.pxu @@ -1,40 +1,48 @@ -id: ce-oem-rs485-full +id: ce-oem-serial-full unit: test plan -_name: RS485 remote test -_description: RS485 remote tests for devices +_name: Serial full tests +_description: + Full serial tests for devices +bootstrap_include: include: nested_part: - ce-oem-rs485-manual - ce-oem-rs485-automated - after-suspend-ce-oem-rs485-manual - after-suspend-ce-oem-rs485-automated + ce-oem-serial-automated -id: ce-oem-rs485-manual +# Currently this testpln is not nested in any testplan +# It stays here for future testplan refactoring to be nested +id: ce-oem-serial-automated unit: test plan -_name: RS485 remote manual tests -_description: Manual RS485 remote tests for devices -include: - -id: ce-oem-rs485-automated -unit: test plan -_name: RS485 remote auto tests -_description: Automated RS485 remote tests for devices +_name: Serial automated tests +_description: + Automated serial tests for devices + including before and after suspend bootstrap_include: - ce-oem-serial/rs485-list + ce-oem-serial/serial-list + ce-oem-serial/serial-console-list include: - ce-oem-serial/rs485-remote-.* + ce-oem-serial/serial-transmit-data-tests + ce-oem-serial/serial-console-tests -id: after-suspend-ce-oem-rs485-manual +id: before-suspend-ce-oem-serial-automated unit: test plan -_name: After suspend RS485 remote manual tests -_description: Manual after-suspend RS485 remote tests for devices +_name: Before suspend serial automated tests +_description: + Before suspend automated serial tests for devices +bootstrap_include: + ce-oem-serial/serial-list + ce-oem-serial/serial-console-list include: + ce-oem-serial/serial-transmit-data-.* + ce-oem-serial/serial-console-.* -id: after-suspend-ce-oem-rs485-automated +id: after-suspend-ce-oem-serial-automated unit: test plan -_name: After suspend RS485 remote auto tests -_description: Automated after-suspend RS485 remote tests for devices +_name: After suspend serial automated tests +_description: + After suspend automated serial tests for devices bootstrap_include: - ce-oem-serial/rs485-list + ce-oem-serial/serial-list + ce-oem-serial/serial-console-list include: - also-after-suspend-ce-oem-serial/rs485-remote-.* + after-suspend-ce-oem-serial/serial-transmit-data-.* + after-suspend-ce-oem-serial/serial-console-.* diff --git a/contrib/checkbox-provider-ce-oem/units/test-plan-ce-oem.pxu b/contrib/checkbox-provider-ce-oem/units/test-plan-ce-oem.pxu index 877767ca5c..feb7275571 100644 --- a/contrib/checkbox-provider-ce-oem/units/test-plan-ce-oem.pxu +++ b/contrib/checkbox-provider-ce-oem/units/test-plan-ce-oem.pxu @@ -36,7 +36,6 @@ nested_part: ce-oem-audio-manual ce-oem-otg-manual ce-oem-rtc-manual - ce-oem-rs485-manual ce-oem-eeprom-manual ce-oem-led-manual ce-oem-caam-manual @@ -66,7 +65,7 @@ nested_part: ce-oem-audio-automated ce-oem-otg-automated ce-oem-rtc-automated - ce-oem-rs485-automated + before-suspend-ce-oem-serial-automated ce-oem-eeprom-automated ce-oem-led-automated ce-oem-accelerator-automated @@ -96,7 +95,6 @@ nested_part: after-suspend-ce-oem-audio-manual after-suspend-ce-oem-otg-manual after-suspend-ce-oem-rtc-manual - after-suspend-ce-oem-rs485-manual after-suspend-ce-oem-eeprom-manual after-suspend-ce-oem-led-manual after-suspend-ce-oem-caam-manual @@ -124,7 +122,7 @@ nested_part: after-suspend-ce-oem-audio-automated after-suspend-ce-oem-otg-automated after-suspend-ce-oem-rtc-automated - after-suspend-ce-oem-rs485-automated + after-suspend-ce-oem-serial-automated after-suspend-ce-oem-eeprom-automated after-suspend-ce-oem-led-automated after-suspend-ce-oem-accelerator-automated From ad2452397e8e0df2eb93b46d05825ba074689aca Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Mon, 18 Mar 2024 09:08:44 +0100 Subject: [PATCH 087/108] Clearer autoresume text (bugfix) (#1070) * Update the comment of shell jobs appropriate to the outcome * Uppercase Checkbox * Also fix text in the tests * Better text --- checkbox-ng/plainbox/impl/session/remote_assistant.py | 9 +++++++++ .../plainbox/impl/session/test_remote_assistant.py | 11 +++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/checkbox-ng/plainbox/impl/session/remote_assistant.py b/checkbox-ng/plainbox/impl/session/remote_assistant.py index 903b3f1933..abf0d35b23 100644 --- a/checkbox-ng/plainbox/impl/session/remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/remote_assistant.py @@ -768,8 +768,17 @@ def resume_by_id(self, session_id=None, overwrite_result_dict={}): if the_job.plugin == "shell": if "noreturn" in the_job.get_flag_set(): result_dict["outcome"] = IJobResult.OUTCOME_PASS + result_dict["comments"] = ( + "Job rebooted the machine or the Checkbox agent. " + "Resuming the session and marking it as passed " + "because the job has the `noreturn` flag" + ) else: result_dict["outcome"] = IJobResult.OUTCOME_CRASH + result_dict["comments"] = ( + "Job rebooted the machine or the Checkbox agent. " + "Resuming the session and marking it as crashed." + ) result_dict.update(overwrite_result_dict) result = MemoryJobResult(result_dict) diff --git a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py index 7889ace3e7..06a3491932 100644 --- a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py @@ -249,7 +249,11 @@ def test_resume_by_id_with_result_no_file_noreturn( mjr = MemoryJobResult( { "outcome": IJobResult.OUTCOME_PASS, - "comments": "Automatically passed after resuming execution", + "comments": ( + "Job rebooted the machine or the Checkbox agent. " + "Resuming the session and marking it as passed " + "because the job has the `noreturn` flag" + ), } ) @@ -281,7 +285,10 @@ def test_resume_by_id_with_result_no_file_normal(self, mock_load_configs): mjr = MemoryJobResult( { "outcome": IJobResult.OUTCOME_CRASH, - "comments": "Automatically passed after resuming execution", + "comments": ( + "Job rebooted the machine or the Checkbox agent. " + "Resuming the session and marking it as crashed." + ), } ) From 9ec0b3c7dc637f005017f3f2945705e93c16d15a Mon Sep 17 00:00:00 2001 From: Isaac Yang <47034756+seankingyang@users.noreply.github.com> Date: Mon, 18 Mar 2024 16:12:54 +0800 Subject: [PATCH 088/108] Correct the suspend/resume threshold variable (Bugfix) (#1078) Correct the suspend/resume variable mapping in suspend cycles with reboot --- providers/base/units/stress/suspend_cycles_reboot.pxu | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/base/units/stress/suspend_cycles_reboot.pxu b/providers/base/units/stress/suspend_cycles_reboot.pxu index 0f5c8ce694..1353324df6 100644 --- a/providers/base/units/stress/suspend_cycles_reboot.pxu +++ b/providers/base/units/stress/suspend_cycles_reboot.pxu @@ -57,8 +57,8 @@ environ: STRESS_S3_ITERATIONS STRESS_SUSPEND_REBOOT_ITERATIONS STRESS_SUSPEND_SL command: echo "reboot_iterations: ${STRESS_SUSPEND_REBOOT_ITERATIONS:-3}" echo "s3_iterations: ${STRESS_S3_ITERATIONS:-30}" - echo "resume_threshold: ${STRESS_SUSPEND_SLEEP_THRESHOLD:-10}" - echo "sleep_threshold: ${STRESS_SUSPEND_RESUME_THRESHOLD:-5}" + echo "resume_threshold: ${STRESS_SUSPEND_RESUME_THRESHOLD:-5}" + echo "sleep_threshold: ${STRESS_SUSPEND_SLEEP_THRESHOLD:-10}" echo estimated_duration: 1s From c6ba96972f96e4b1b0ebc8d5e97d3f1c4865ff29 Mon Sep 17 00:00:00 2001 From: Sylvain Pineau Date: Mon, 18 Mar 2024 09:48:52 +0100 Subject: [PATCH 089/108] Remove the dependency on Sylvain's ppa to get libraspberrypi0 (BugFix) (#1015) Fix: Remove the dependency on Sylvain's ppa to get libraspberrypi0 The same packages are availbale from the checkbox stable ppa as of now: https://launchpad.net/~checkbox-dev/+archive/ubuntu/stable --- checkbox-core-snap/series16/snap/snapcraft.yaml | 8 -------- checkbox-core-snap/series18/snap/snapcraft.yaml | 8 -------- checkbox-core-snap/series20/snap/snapcraft.yaml | 8 -------- 3 files changed, 24 deletions(-) diff --git a/checkbox-core-snap/series16/snap/snapcraft.yaml b/checkbox-core-snap/series16/snap/snapcraft.yaml index b5e90ee438..a9ac69f502 100644 --- a/checkbox-core-snap/series16/snap/snapcraft.yaml +++ b/checkbox-core-snap/series16/snap/snapcraft.yaml @@ -404,15 +404,7 @@ parts: - picamera build-environment: - READTHEDOCS: 'True' - rpi-ppa: - plugin: nil - override-pull: | - sudo add-apt-repository ppa:sylvain-pineau/ppa-rpi - sudo apt-get update - build-packages: - - software-properties-common rpi-support-binaries: - after: [rpi-ppa] plugin: nil stage-packages: - on armhf: diff --git a/checkbox-core-snap/series18/snap/snapcraft.yaml b/checkbox-core-snap/series18/snap/snapcraft.yaml index 0c1117e6a0..d8772ed932 100644 --- a/checkbox-core-snap/series18/snap/snapcraft.yaml +++ b/checkbox-core-snap/series18/snap/snapcraft.yaml @@ -404,15 +404,7 @@ parts: - picamera build-environment: - READTHEDOCS: 'True' - rpi-ppa: - plugin: nil - override-pull: | - sudo add-apt-repository ppa:sylvain-pineau/ppa-rpi - sudo apt-get update - build-packages: - - software-properties-common rpi-support-binaries: - after: [rpi-ppa] plugin: nil stage-packages: - on armhf: diff --git a/checkbox-core-snap/series20/snap/snapcraft.yaml b/checkbox-core-snap/series20/snap/snapcraft.yaml index 071a9769b7..a3f3db83e8 100644 --- a/checkbox-core-snap/series20/snap/snapcraft.yaml +++ b/checkbox-core-snap/series20/snap/snapcraft.yaml @@ -437,15 +437,7 @@ parts: python3 manage.py install --layout=relocatable --prefix=/providers/checkbox-provider-certification-server --root="$SNAPCRAFT_PART_INSTALL" after: [checkbox-provider-certification-client] ################################################################################ - rpi-ppa: - plugin: nil - override-pull: | - sudo add-apt-repository ppa:sylvain-pineau/ppa-rpi - sudo apt-get update - build-packages: - - software-properties-common rpi-support-binaries: - after: [rpi-ppa] plugin: nil stage-packages: - on armhf: From 0c6b0d96820724e4318fb82e2386d45b1e476609 Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Mon, 18 Mar 2024 17:13:00 +0100 Subject: [PATCH 090/108] Cover brightness genio test (New) (#1071) * Modified brightness test to create the unit tests - Moved brightness test to a separated function - Error not raised on successful execution * Added unit tests for brightness_test.py * mock the Brightness object instead of patching all the methods. * Removed MagicMock duplicates --- contrib/genio/bin/brightness_test.py | 100 ++++++------ contrib/genio/tests/test_brightness.py | 218 +++++++++++++++++++++++++ 2 files changed, 271 insertions(+), 47 deletions(-) create mode 100644 contrib/genio/tests/test_brightness.py diff --git a/contrib/genio/bin/brightness_test.py b/contrib/genio/bin/brightness_test.py index 1883efebde..48006e5e3e 100755 --- a/contrib/genio/bin/brightness_test.py +++ b/contrib/genio/bin/brightness_test.py @@ -119,6 +119,58 @@ def was_brightness_applied(self, interface): else: return 0 + def brightness_test(self, target_interface): + # If no backlight interface can be found + if len(self.interfaces) == 0: + raise SystemExit("ERROR: no brightness interfaces found") + + exit_status = 0 + find_target_display = False + print(f'Available Interfaces: {self.interfaces}') + for interface in self.interfaces: + if target_interface in interface: + find_target_display = True + # Get the current brightness which we can restore later + original_brightness = self.get_actual_brightness(interface) + print(f'Current brightness: {original_brightness}') + + # Get the maximum value for brightness + max_brightness = self.get_max_brightness(interface) + print(f'Maximum brightness: {max_brightness}\n') + + for m in [0, 0.25, 0.5, 0.75, 1]: + # Set the brightness to half the max value + current_brightness = math.ceil(max_brightness * m) + print(f'Set the brightness as {current_brightness}') + self.write_value( + current_brightness, + os.path.join(interface, 'brightness')) + + # Check that "actual_brightness" reports the same value we + # set "brightness" to + exit_status += self.was_brightness_applied(interface) + + # Wait a little bit before going back to the original value + time.sleep(2) + + # Set the brightness back to its original value + self.write_value( + original_brightness, + os.path.join(interface, 'brightness')) + print( + 'Set brightness back to original value:' + f'{original_brightness}' + ) + # Close the loop since the target display has been tested + break + + if not find_target_display: + raise SystemExit( + f"ERROR: no {target_interface} interface be found" + ) + if exit_status: + raise SystemExit(exit_status) + def main(): parser = ArgumentParser(formatter_class=RawTextHelpFormatter) @@ -167,53 +219,7 @@ def main(): f"ERROR: no suitable interface of {args.display} display") brightness = Brightness() - - # If no backlight interface can be found - if len(brightness.interfaces) == 0: - raise SystemExit("ERROR: no brightness interfaces found") - - exit_status = 0 - find_target_display = False - print(f'Available Interfaces: {brightness.interfaces}') - for interface in brightness.interfaces: - if target_interface in interface: - find_target_display = True - # Get the current brightness which we can restore later - original_brightness = brightness.get_actual_brightness(interface) - print(f'Current brightness: {original_brightness}') - - # Get the maximum value for brightness - max_brightness = brightness.get_max_brightness(interface) - print(f'Maximum brightness: {max_brightness}\n') - - for m in [0, 0.25, 0.5, 0.75, 1]: - # Set the brightness to half the max value - current_brightness = math.ceil(max_brightness * m) - print(f'Set the brightness as {current_brightness}') - brightness.write_value( - current_brightness, - os.path.join(interface, 'brightness')) - - # Check that "actual_brightness" reports the same value we - # set "brightness" to - exit_status += brightness.was_brightness_applied(interface) - - # Wait a little bit before going back to the original value - time.sleep(2) - - # Set the brightness back to its original value - brightness.write_value( - original_brightness, - os.path.join(interface, 'brightness')) - print( - f'Set brightness back to original value: {original_brightness}' - ) - # Close the loop since the target display has been tested - break - - if not find_target_display: - raise SystemExit(f"ERROR: no {target_interface} interface be found") - raise SystemExit(exit_status) + brightness.brightness_test(target_interface) if __name__ == '__main__': diff --git a/contrib/genio/tests/test_brightness.py b/contrib/genio/tests/test_brightness.py new file mode 100644 index 0000000000..5e7d27e235 --- /dev/null +++ b/contrib/genio/tests/test_brightness.py @@ -0,0 +1,218 @@ +import unittest +import os +from unittest.mock import patch, mock_open, MagicMock +from brightness_test import Brightness, main + + +class TestBrightness(unittest.TestCase): + + def test_init(self): + mock_brightness = MagicMock() + mock_brightness._get_interfaces_from_path.return_value = [ + "/sys/class/backlight/interface1", + "/sys/class/backlight/interface2", + ] + Brightness.__init__(mock_brightness) + self.assertEqual( + mock_brightness.interfaces, + [ + "/sys/class/backlight/interface1", + "/sys/class/backlight/interface2", + ], + ) + + def test_read_value(self): + mock_brightness = MagicMock() + data = "100\n" + with patch("builtins.open", mock_open(read_data=data)): + self.assertEqual( + Brightness.read_value(mock_brightness, "test_path"), 100 + ) + + # The test raises a value error if the data is None + file = None + with self.assertRaises(ValueError): + Brightness.read_value(mock_brightness, file) + + # The test can handle a file object + mock_file = MagicMock() + mock_file.write = "" + mock_file.readlines.return_value = ["100\n"] + self.assertEqual( + Brightness.read_value(mock_brightness, mock_file), 100 + ) + + @patch("builtins.open") + def test_write_value(self, mock_open): + mock_brightness = MagicMock() + mock_file = MagicMock() + mock_open.return_value = mock_file + + Brightness.write_value(mock_brightness, 100, "test_path") + mock_open.assert_called_once_with("test_path", "w") + mock_file.write.assert_called_once_with("100") + + # The file is opened in append mode if the test argument is True + Brightness.write_value(mock_brightness, 100, "test_path", True) + mock_open.assert_called_with("test_path", "a") + + # The test can handle a file object + Brightness.write_value(mock_brightness, 100, mock_file) + mock_file.write.assert_called_with("100") + + def test_get_max_brightness(self): + mock_brightness = MagicMock() + mock_brightness.read_value.return_value = 100 + self.assertEqual( + Brightness.get_max_brightness(mock_brightness, "test_path"), 100 + ) + + def test_get_actual_brightness(self): + mock_brightness = MagicMock() + mock_brightness.read_value.return_value = 100 + self.assertEqual( + Brightness.get_actual_brightness(mock_brightness, "test_path"), 100 + ) + + def test_get_last_set_brightness(self): + mock_brightness = MagicMock() + mock_brightness.read_value.return_value = 100 + self.assertEqual( + Brightness.get_last_set_brightness(mock_brightness, "test_path"), + 100, + ) + + @patch("os.path.isdir") + @patch("brightness_test.glob") + def test_get_interfaces_from_path(self, mock_glob, mock_isdir): + mock_brightness = MagicMock() + mock_isdir.return_value = True + mock_glob.return_value = ["/sys/class/backlight/interface1"] + self.assertEqual( + Brightness._get_interfaces_from_path( + mock_brightness, + ), + ["/sys/class/backlight/interface1"], + ) + + # Returns an empty list if the path is not a directory + mock_isdir.return_value = False + self.assertEqual( + Brightness._get_interfaces_from_path( + mock_brightness, + ), + [], + ) + + # Returns an empty list if there are no directories in the path + mock_isdir.side_effect = [True, False] + self.assertEqual( + Brightness._get_interfaces_from_path( + mock_brightness, + ), + [], + ) + + def test_was_brightness_applied(self): + mock_brightness = MagicMock() + mock_brightness.get_actual_brightness.return_value = 100 + mock_brightness.get_last_set_brightness.return_value = 100 + self.assertEqual( + Brightness.was_brightness_applied(mock_brightness, "test_path"), 0 + ) + + mock_brightness.get_actual_brightness.return_value = 100 + mock_brightness.get_last_set_brightness.return_value = 105 + self.assertEqual( + Brightness.was_brightness_applied(mock_brightness, "test_path"), 1 + ) + + @patch("time.sleep", MagicMock()) + def test_brightness(self): + mock_brightness = MagicMock() + mock_brightness.interfaces = [ + "/sys/class/backlight/interface1", + "/sys/class/backlight/interface2", + ] + mock_brightness.get_actual_brightness.return_value = 100 + mock_brightness.get_max_brightness.return_value = 200 + mock_brightness.was_brightness_applied.return_value = 0 + + target_interface = "/sys/class/backlight/interface1" + Brightness.brightness_test(mock_brightness, target_interface) + + mock_brightness.get_actual_brightness.assert_called_once_with( + target_interface + ) + mock_brightness.get_max_brightness.assert_called_once_with( + target_interface + ) + mock_brightness.write_value.assert_called_with( + 100, os.path.join(target_interface, "brightness") + ) + self.assertEqual(mock_brightness.was_brightness_applied.call_count, 5) + + # Test the case where the brightness was not applied + mock_brightness.was_brightness_applied.return_value = 1 + with self.assertRaises(SystemExit): + Brightness.brightness_test(mock_brightness, target_interface) + + def test_brightness_no_interfaces(self): + mock_brightness = MagicMock() + mock_brightness.interfaces = [] + target_interface = "/sys/class/backlight/interface1" + with self.assertRaises(SystemExit) as cm: + Brightness.brightness_test(mock_brightness, target_interface) + self.assertIn("ERROR", str(cm.exception)) + + def test_brightness_no_target_interface(self): + mock_brightness = MagicMock() + mock_brightness.interfaces = [ + "/sys/class/backlight/interface1", + "/sys/class/backlight/interface2", + ] + target_interface = "/sys/class/backlight/interface3" + with self.assertRaises(SystemExit) as cm: + Brightness.brightness_test(mock_brightness, target_interface) + self.assertIn("ERROR", str(cm.exception)) + + @patch("os.geteuid") + @patch("brightness_test.Brightness") + def test_main(self, mock_brightness, mock_getuid): + mock_getuid.return_value = 0 + argv = ["script_name", "-p", "G1200-evk", "-d", "dsi"] + with patch("sys.argv", argv): + main() + self.assertEqual(mock_brightness.call_count, 1) + + @patch("os.geteuid") + @patch("brightness_test.Brightness", MagicMock()) + def test_main_bad_args(self, mock_getuid): + mock_getuid.return_value = 0 + argv = ["script_name", "-p", "bad_platform", "-d", "dsi"] + with patch("sys.argv", argv): + with self.assertRaises(SystemExit): + main() + + argv = ["script_name", "-p", "G1200-evk", "-d", "bad_display"] + with patch("sys.argv", argv): + with self.assertRaises(SystemExit): + main() + + @patch("os.geteuid") + @patch("brightness_test.Brightness", MagicMock()) + def test_main_no_root(self, mock_getuid): + mock_getuid.return_value = 1 + argv = ["script_name", "-p", "G1200-evk", "-d", "dsi"] + with patch("sys.argv", argv): + with self.assertRaises(SystemExit): + main() + + @patch("os.geteuid") + @patch("brightness_test.Brightness", MagicMock()) + def test_main_wrong_interfaces(self, mock_getuid): + mock_getuid.return_value = 0 + argv = ["script_name", "-p", "G350", "-d", "lvds"] + with patch("sys.argv", argv): + with self.assertRaises(SystemExit): + main() From d522d71d14e2bfafdff8eebd7eab8fa69f4ea6dc Mon Sep 17 00:00:00 2001 From: Fernando Bravo <39527354+fernando79513@users.noreply.github.com> Date: Mon, 18 Mar 2024 17:13:10 +0100 Subject: [PATCH 091/108] Cover loopback genio test (New) (#1081) * Removed SystemExit on passing test and removed typo * Add unit tests for GPIO loopback functionality * mock the GPIOSysFsController object instead of patching all the methods. --- contrib/genio/bin/gpio_loopback_test.py | 6 +- contrib/genio/tests/test_gpio_loopback.py | 211 ++++++++++++++++++++++ 2 files changed, 214 insertions(+), 3 deletions(-) create mode 100644 contrib/genio/tests/test_gpio_loopback.py diff --git a/contrib/genio/bin/gpio_loopback_test.py b/contrib/genio/bin/gpio_loopback_test.py index 2f042a2e1a..a2abcf9a3a 100755 --- a/contrib/genio/bin/gpio_loopback_test.py +++ b/contrib/genio/bin/gpio_loopback_test.py @@ -81,8 +81,8 @@ def run_test( print("Input Pin Number: {} + Base Number = {}".format( gpio_input_pin, input_pin_number)) print("\n# Start GPIO loopback test") - raise SystemExit( - not self.loopback_test(output_pin_number, input_pin_number)) + if not self.loopback_test(output_pin_number, input_pin_number): + raise SystemExit("Failed: GPIO loopback test failed") def check_gpio_node(self, port): """Check the GPIO port is exists @@ -172,7 +172,7 @@ def loopback_test(self, out_port, in_port): self.configure_gpio(in_port, "in") for state in self.TEST_STATES: - print("Try to send and receivce {}".format(state)) + print("Try to send and receive {}".format(state)) value = self.read_gpio(in_port) print("The initial input GPIO {}'s value is {}".format( in_port, value)) diff --git a/contrib/genio/tests/test_gpio_loopback.py b/contrib/genio/tests/test_gpio_loopback.py new file mode 100644 index 0000000000..ae11b88e68 --- /dev/null +++ b/contrib/genio/tests/test_gpio_loopback.py @@ -0,0 +1,211 @@ +import textwrap +import unittest +from unittest.mock import patch, mock_open, MagicMock +from gpio_loopback_test import GPIOSysFsController, main + + +class TestGpioLoopback(unittest.TestCase): + + def test_get_gpio_base_number(self): + mock_gpio_controller = MagicMock() + data = textwrap.dedent( + """ + gpiochip0: GPIOs 0-31, ID1, ID2: + gpio-0 ( |sysfs ) in hi + gpio-1 ( |sysfs ) out hi + gpiochip1: GPIOs 32-63, ID3, ID4: + gpio-32 ( |sysfs ) in hi + gpio-33 ( |sysfs ) out hi + """ + ) + with patch("builtins.open", mock_open(read_data=data)): + self.assertEqual( + GPIOSysFsController.get_gpio_base_number(mock_gpio_controller), + { + "gpiochip0": "0", + "gpiochip1": "32", + }, + ) + + @patch("builtins.print") + def test_run_test(self, mock_print): + mock_gpio_controller = MagicMock() + mock_gpio_controller.get_gpio_base_number.return_value = { + "gpiochip0": "0", + "gpiochip1": "32", + } + mock_gpio_controller.loopback_test.return_value = True + + output_gpio_chip_number = "0" + input_gpio_chip_number = "1" + physical_output_port = "J1" + physical_input_port = "J2" + gpio_output_pin = "1" + gpio_input_pin = "2" + + GPIOSysFsController.run_test( + mock_gpio_controller, + output_gpio_chip_number, + input_gpio_chip_number, + physical_output_port, + physical_input_port, + gpio_output_pin, + gpio_input_pin, + ) + + print_calls = [ + "\nOutput Base Number: 0", + "Input Base Number: 32", + "Physical output port: J1, GPIO number: 1", + "Physical input port: J2, GPIO number 2", + "Output Pin Number: 1 + Base Number = 1", + "Input Pin Number: 2 + Base Number = 34", + "\n# Start GPIO loopback test", + ] + + actual_calls = [call.args[0] for call in mock_print.call_args_list] + + self.assertEqual(actual_calls, print_calls) + + def test_run_test_fail(self): + mock_gpio_controller = MagicMock() + mock_gpio_controller.get_gpio_base_number.return_value = { + "gpiochip0": "0", + "gpiochip1": "32", + } + mock_gpio_controller.loopback_test.return_value = False + + with self.assertRaises(SystemExit): + GPIOSysFsController.run_test( + mock_gpio_controller, "0", "1", "J1", "J2", "1", "2" + ) + + @patch("os.path.exists") + def test_check_gpio_node(self, mock_exists): + mock_gpio_controller = MagicMock() + mock_exists.return_value = True + self.assertTrue( + GPIOSysFsController.check_gpio_node(mock_gpio_controller, "test") + ) + + def test_set_gpio(self): + mock_gpio_controller = MagicMock() + mock_gpio_controller.ROOT_PATH = "/sys/class/gpio" + with patch("builtins.open", mock_open()) as mock_file: + GPIOSysFsController.set_gpio(mock_gpio_controller, "test", "1") + mock_file.assert_called_once_with( + "/sys/class/gpio/gpio{}/value".format("test"), "wt" + ) + mock_file().write.assert_called_once_with("1\n") + + def test_read_gpio(self): + mock_gpio_controller = MagicMock() + mock_gpio_controller.ROOT_PATH = "/sys/class/gpio" + with patch("builtins.open", mock_open(read_data="1")) as mock_file: + self.assertEqual( + GPIOSysFsController.read_gpio(mock_gpio_controller, "test"), + "1", + ) + mock_file.assert_called_once_with( + "/sys/class/gpio/gpio{}/value".format("test"), "r" + ) + + def test_set_direction(self): + mock_gpio_controller = MagicMock() + mock_gpio_controller.ROOT_PATH = "/sys/class/gpio" + with patch("builtins.open", mock_open()) as mock_file: + GPIOSysFsController.set_direction( + mock_gpio_controller, "test", "out" + ) + mock_file.assert_called_once_with( + "/sys/class/gpio/gpio{}/direction".format("test"), "w" + ) + mock_file().write.assert_called_once_with("out\n") + + @patch("builtins.open") + def test_configure_gpio(self, mock_open): + mock_gpio_controller = MagicMock() + mock_gpio_controller.ROOT_PATH = "/sys/class/gpio" + mock_gpio_controller.check_gpio_node.return_value = True + GPIOSysFsController.configure_gpio(mock_gpio_controller, "port", "dir") + mock_open.assert_not_called() + mock_gpio_controller.set_direction.assert_called_once_with( + "port", "dir" + ) + + # If the GPIO node does not exist, it should be created + mock_gpio_controller.check_gpio_node.side_effect = [False, True] + GPIOSysFsController.configure_gpio(mock_gpio_controller, "port", "dir") + mock_open.assert_called_once_with("/sys/class/gpio/export", "w") + with mock_open() as mock_file: + mock_file.write.assert_called_once_with("port\n") + mock_gpio_controller.set_direction.assert_called_with("port", "dir") + + @patch("builtins.open") + def test_configure_fail(self, mock_open): + mock_gpio_controller = MagicMock() + # The test should fail if the GPIO can't be exported + mock_gpio_controller.check_gpio_node.side_effect = [False, False] + with self.assertRaises(SystemExit): + GPIOSysFsController.configure_gpio( + mock_gpio_controller, "port", "dir" + ) + mock_gpio_controller.set_direction.assert_not_called() + + # The test should fail if the direction can't be set + mock_gpio_controller.check_gpio_node.side_effect = [True, True] + mock_gpio_controller.set_direction.side_effect = IOError + with self.assertRaises(IOError): + GPIOSysFsController.configure_gpio( + mock_gpio_controller, "port", "dir" + ) + + @patch("time.sleep", MagicMock()) + def test_loopback_test(self): + mock_gpio_controller = MagicMock() + mock_gpio_controller.TEST_STATES = [0, 1] + mock_gpio_controller.read_gpio.side_effect = ["1", "0", "0", "1"] + self.assertTrue( + GPIOSysFsController.loopback_test(mock_gpio_controller, "1", "34") + ) + # configure_gpio should be called twice, once for each port + self.assertEqual(mock_gpio_controller.configure_gpio.call_count, 2) + # set_gpio should be called twice, once for each state + self.assertEqual(mock_gpio_controller.set_gpio.call_count, 2) + + @patch("time.sleep", MagicMock()) + def test_loopback_test_fail(self): + mock_gpio_controller = MagicMock() + mock_gpio_controller.TEST_STATES = [0, 1] + mock_gpio_controller.read_gpio.side_effect = ["1", "0", "0", "0"] + self.assertFalse( + GPIOSysFsController.loopback_test(mock_gpio_controller, "1", "34") + ) + # configure_gpio should be called twice, once for each port + self.assertEqual(mock_gpio_controller.configure_gpio.call_count, 2) + # set_gpio should be called twice, once for each state + self.assertEqual(mock_gpio_controller.set_gpio.call_count, 2) + + @patch("gpio_loopback_test.GPIOSysFsController.run_test") + def test_main(self, mock_run_test): + mock_run_test.return_value = None + args = ( + ["script_name"] + + ["-oc", "0"] + + ["-ic", "1"] + + ["-po", "J1"] + + ["-pi", "J2"] + + ["-go", "1"] + + ["-gi", "2"] + ) + with patch("sys.argv", args): + self.assertEqual(main(), None) + mock_run_test.assert_called_once_with( + "0", "1", "J1", "J2", "1", "2" + ) + + # Test fails if run_test raises a SystemExit + mock_run_test.side_effect = SystemExit + with patch("sys.argv", args): + with self.assertRaises(SystemExit): + main() From 2a5589dce15d3030f2eb874efb9bf3eb70652f43 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Mon, 18 Mar 2024 17:17:45 +0100 Subject: [PATCH 092/108] New timeout helper module (New) (#1072) * New timeout helper module * Drop processes and Pools, use threads * Renamed timeout function and added docstrings to the decorator * Replace TimeoutError with SystemExit This was done because counterintuitively the timeout doesn't terminate the tracked task. This could be an issue if the user doesn't realize and uses it to launch a stress test or some other heavy task assuming the timeout would kill it --- .../checkbox_support/helpers/timeout.py | 73 ++++++++++++ .../checkbox_support/tests/test_timeout.py | 111 ++++++++++++++++++ 2 files changed, 184 insertions(+) create mode 100644 checkbox-support/checkbox_support/helpers/timeout.py create mode 100644 checkbox-support/checkbox_support/tests/test_timeout.py diff --git a/checkbox-support/checkbox_support/helpers/timeout.py b/checkbox-support/checkbox_support/helpers/timeout.py new file mode 100644 index 0000000000..2a4c29b758 --- /dev/null +++ b/checkbox-support/checkbox_support/helpers/timeout.py @@ -0,0 +1,73 @@ +# This file is part of Checkbox. +# +# Copyright 2024 Canonical Ltd. +# Written by: +# Massimimliano Girardi +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . +""" +checkbox_support.helpers.timeout +============================================= + +Utility class that provides functionalities connected to placing timeouts on +functions +""" +import threading + +from queue import Queue +from contextlib import wraps + + +def run_with_timeout(f, timeout_s, *args, **kwargs): + """ + Runs a function with the given args and kwargs. If the function doesn't + terminate within timeout_s seconds, this raises SystemExit because the + expiration of the timeout does not terminate the underlying task, therefore + the process should exit to reach that goal. + """ + result_queue = Queue() + exception_queue = Queue() + + def _f(*args, **kwargs): + try: + result_queue.put(f(*args, **kwargs)) + except BaseException as e: + exception_queue.put(e) + + thread = threading.Thread(target=_f, args=args, kwargs=kwargs, daemon=True) + thread.start() + thread.join(timeout_s) + + if thread.is_alive(): + raise SystemExit( + "Task unable to finish in {}s".format(timeout_s) + ) from TimeoutError + if not exception_queue.empty(): + raise exception_queue.get() + return result_queue.get() + + +def timeout(timeout_s): + """ + Lets the decorated function run for up to timeout_s seconds. If the + function doesn't terminate within the timeout, raises TimeoutError + """ + + def timeout_timeout_s(f): + @wraps(f) + def _f(*args, **kwargs): + return run_with_timeout(f, timeout_s, *args, **kwargs) + + return _f + + return timeout_timeout_s diff --git a/checkbox-support/checkbox_support/tests/test_timeout.py b/checkbox-support/checkbox_support/tests/test_timeout.py new file mode 100644 index 0000000000..4567c0a13c --- /dev/null +++ b/checkbox-support/checkbox_support/tests/test_timeout.py @@ -0,0 +1,111 @@ +# This file is part of Checkbox. +# +# Copyright 2024 Canonical Ltd. +# Written by: +# Massimimliano Girardi +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . + +import time + +from unittest import TestCase + +from checkbox_support.helpers.timeout import run_with_timeout, timeout + + +class ClassSupport: + def __init__(self, work_time): + self.work_time = work_time + + def heavy_function(self): + time.sleep(self.work_time) + return "ClassSupport return value" + + +def heavy_function(time_s): + time.sleep(time_s) + return "ClassSupport return value" + + +def some_exception_raiser(): + raise ValueError("value error!") + + +def system_exit_raiser(): + raise SystemExit("abc") + + +def kwargs_args_support(first, second, third=3): + return (first, second, third) + + +class TestTimeoutExec(TestCase): + def test_class_field_timeouts(self): + some = ClassSupport(1) + with self.assertRaises(SystemExit): + run_with_timeout(some.heavy_function, 0) + + def test_class_field_ok_return(self): + some = ClassSupport(0) + self.assertEqual( + run_with_timeout(some.heavy_function, 10), "ClassSupport return value" + ) + + def test_function_timeouts(self): + with self.assertRaises(SystemExit): + run_with_timeout(heavy_function, 0, 10) + + def test_function_ok_return(self): + self.assertEqual( + run_with_timeout(heavy_function, 10, 0), "ClassSupport return value" + ) + + def test_function_exception_propagation(self): + with self.assertRaises(ValueError): + run_with_timeout(some_exception_raiser, 0) + + def test_function_systemexit_propagation(self): + with self.assertRaises(SystemExit): + system_exit_raiser() + + def test_function_args_kwargs_support(self): + self.assertEqual( + run_with_timeout( + kwargs_args_support, 1, "first", "second", third="third" + ), + ("first", "second", "third"), + ) + + def test_decorator_test_ok(self): + @timeout(1) + def f(first, second, third): + return (first, second, third) + + self.assertEqual(f(1, 2, 3), (1, 2, 3)) + + def test_decorator_test_fail(self): + @timeout(0) + def f(first, second, third): + time.sleep(100) + return (first, second, third) + + with self.assertRaises(SystemExit): + f(1, 2, 3) + + def test_decorator_exception(self): + @timeout(1) + def f(first, second, third): + raise ValueError("error with first") + + with self.assertRaises(ValueError): + f(1,2,3) From d8063c232f611e81c843d44d917ec12ab4ef117f Mon Sep 17 00:00:00 2001 From: hanhsuan <32028620+hanhsuan@users.noreply.github.com> Date: Tue, 19 Mar 2024 00:37:07 +0800 Subject: [PATCH 093/108] Change to new graphic test strategy (BugFix) (#586) * Changing gpu test strategy to prime/reverse-prime gpu offload without depending on index For Nvidia GPU, the prime/reverse prime offload is not supported before version 435.17. Therefore, This new strategy is only for 22.04+. For backward compatibility, this PR add new test plans for 22.04+ as follow: graphics-gpu-cert-full graphics-gpu-cert-automated graphics-gpu-cert-manual after-suspend-graphics-gpu-cert-full after-suspend-graphics-gpu-cert-automated after-suspend-graphics-gpu-cert-manual monitor-gpu-cert-full monitor-gpu-cert-automated monitor-gpu-cert-manual after-suspend-monitor-gpu-cert-full after-suspend-monitor-gpu-cert-automated after-suspend-monitor-gpu-cert-manual And add new python script "prime_offload_tester.py" to execute command with prime/reverse prime setting for new test jobs as follow: Auto test: graphics/{index}_auto_glxgears_{product_slug} graphics/{index}_auto_glxgears_fullscreen_{product_slug} Manual: graphics/{index}_valid_glxgears_{product_slug} graphics/{index}_valid_glxgears_fullscreen_{product_slug} * Add more unit test for graphics_card_resource.py and prime_offload_tester.py * Add one more unit test * move parse arguments to single function for unit testing * Fix flake8 error * 1. Refactory to be more like python 2. add extra method for avoid checking fail by 6.5 kernel bug * Fix flake8 error * add executable permission * 1. Move changes of job and test-plan to another PR 2. Bug of 6.5 kernel is released in proposed kernel 6.5.0.16 and have tested. Therefore, removing workaround. * 1. Move change of jobs and test plan to another PR 2. add more unit tests * Fix pci BDF format check error ref: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/arch/x86/pci/early.c?id=refs/tags/v3.12.7#n65 https://wiki.xenproject.org/wiki/Bus:Device.Function_(BDF)_Notation * Update providers/base/bin/prime_offload_tester.py Co-authored-by: kissiel * Update providers/base/bin/prime_offload_tester.py Co-authored-by: kissiel * Update providers/base/bin/prime_offload_tester.py Co-authored-by: kissiel * 1. move the get clients from check_offload to get_client 2. fix docstring error 3. change default to 20s and the logic in the check_offload 4. change RuntimeError to SystemExit --------- Co-authored-by: kissiel --- providers/base/bin/prime_offload_tester.py | 302 ++++++++++ .../base/tests/test_prime_offload_tester.py | 519 ++++++++++++++++++ .../resource/bin/graphics_card_resource.py | 3 + .../tests/test_graphics_card_resource.py | 35 ++ 4 files changed, 859 insertions(+) create mode 100755 providers/base/bin/prime_offload_tester.py create mode 100755 providers/base/tests/test_prime_offload_tester.py create mode 100755 providers/resource/tests/test_graphics_card_resource.py diff --git a/providers/base/bin/prime_offload_tester.py b/providers/base/bin/prime_offload_tester.py new file mode 100755 index 0000000000..0aecf2a233 --- /dev/null +++ b/providers/base/bin/prime_offload_tester.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python3 +# This file is part of Checkbox. +# +# Copyright 2023 Canonical Ltd. +# Written by: +# Hanhsuan Lee +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . + +import sys +import threading +import subprocess +import time +import re +import json +import argparse +import logging +import os + + +class PrimeOffloader: + """ + A class used to execute process to specific GPU. + Have to run this as root. + + :attr logger: console logger + :type logger: RootLogger + + :attr check_result: + store the result of checking offloading is ok or not. + :type check_result: bool + """ + + logger = logging.getLogger() + check_result = False + + def find_card_id(self, pci_name: str) -> str: + """ + use pci name to find card id under /sys/kernel/debug/dri + + :param pci_name: pci device name in NNNN:NN:NN.N format + + :returns: card id + """ + pci_name_format = "[0-9]{4}:[0-9,a-f]{2}:[0-9,a-f]{2}.[0-9]" + if not re.match(pci_name_format, pci_name.lower()): + raise SystemExit("pci name format error") + + try: + cmd = ["grep", + "-lr", + "--include=name", + pci_name, + "/sys/kernel/debug/dri"] + + card_path = subprocess.check_output(cmd, + universal_newlines=True) + return card_path.split('/')[5] + except IndexError as e: + raise SystemExit("return value format error {}".format(repr(e))) + except subprocess.CalledProcessError as e: + raise SystemExit("run command failed {}".format(repr(e))) + + def find_card_name(self, pci_name: str) -> str: + """ + use pci name to find card name by lshw + + :param pci_name: pci device name in NNNN:NN:NN.N format + + :returns: card name + """ + cmd = ["lshw", "-c", "display", "-json"] + try: + card_infos = subprocess.check_output(cmd, + universal_newlines=True) + infos = json.loads(card_infos) + for info in infos: + if pci_name in info['businfo']: + return info['product'] + raise SystemExit("Card name not found") + except (KeyError, TypeError, json.decoder.JSONDecodeError) as e: + raise SystemExit("return value format error {}".format(e)) + except subprocess.CalledProcessError as e: + raise SystemExit("run command failed {}".format(repr(e))) + + def get_clients(self, card_id: str) -> str: + """ + Use to get clients that running on specific GPU + by reading debugfs. + + .. note:: + While setting prime offload environment such as DRI_PRIME, + the process will be listed under kernel debug interface. + The location of kernel debug interface is + /sys/kernel/debug/dri/, + and the process could be found in + /sys/kernel/debug/dri//clients + + :param cmd: command that running under prime offload + """ + read_clients_cmd = ["cat", + "/sys/kernel/debug/dri/{}/clients" + .format(card_id)] + try: + return subprocess.check_output(read_clients_cmd, + universal_newlines=True) + except subprocess.CalledProcessError: + self.logger.info("Couldn't get clients on specific GPU{}" + .format(card_id)) + + def check_offload(self, cmd: list, card_id: str, + card_name: str, timeout: str): + """ + Use to check provided command is executed on specific GPU. + + :param cmd: command that running under prime offload + + :param card_id: card id of dri device + + :param card_name: card name of dri device + + :param timeout: timeout for offloaded command + """ + delay = timeout / 10 + + deadline = time.time() + timeout + + while time.time() < deadline: + time.sleep(delay) + clients = self.get_clients(card_id) + if clients and cmd[0] in clients: + self.logger.info("Checking success:") + self.logger.info(" Offload process:[{}]".format(cmd)) + self.logger.info(" Card ID:[{}]".format(card_id)) + self.logger.info(" Device Name:[{}]".format(card_name)) + return + self.logger.info("Checking fail:") + self.logger.info(" Couldn't find process [{}]".format(cmd)) + self.check_result = True + + def check_nv_offload_env(self): + """ + prime offload of nvidia driver is limited. + Only on-demand mode is supported. + """ + # nvidia-smi ship with NVIDIA GPU display drivers on Linux + # https://developer.nvidia.com/nvidia-system-management-interface + # check prime-select to make sure the nv driver is included. + # If there is no nv driver, prime offload is fine for other drivers. + try: + if "on-demand" not in subprocess.check_output( + ["prime-select", "query"], universal_newlines=True): + raise SystemExit("System isn't on-demand mode") + + # prime offload couldn't running on nvlink active or inactive + # Therefore, only return empty string is supported environment. + nvlink = subprocess.check_output(["nvidia-smi", "nvlink", "-s"], + universal_newlines=True) + if nvlink: + if 'error' in nvlink.lower(): + raise SystemExit("nvidia driver error") + raise SystemExit("NVLINK detected") + except FileNotFoundError: + self.logger.info( + "No prime-select, it should be ok to run prime offload") + + def run_offload_cmd(self, cmd: str, pci_name: str, + driver: str, timeout: int): + """ + run offload command and check it runs on correct GPU + + :param cmd: command that running under prime offload + + :param pci_name: pci device name in NNNN:NN:NN.N format + + :param driver: GPU driver, such as i915, amdgpu, nvidia + + :param timeout: timeout for offloaded command + """ + card_id = self.find_card_id(pci_name) + card_name = self.find_card_name(pci_name) + + # run offload command in other process + dri_pci_name_format = re.sub("[:.]", "_", pci_name) + + if "timeout" in cmd: + raise SystemExit("Put timeout in command isn't allowed") + + cmd = cmd.split() + if timeout > 0: + offload_cmd = ["timeout", str(timeout)] + cmd + else: + # if timeout <=0 will make check_offload failed. + # Set the timeout to the default value + log_str = ("Timeout {}s is invalid," + " remove the timeout setting" + " and change check_offload to run 20s".format(timeout)) + self.logger.info(log_str) + timeout = 20 + offload_cmd = cmd + + env = os.environ.copy() + if driver in ('nvidia', 'pcieport'): + offload_env = {"__NV_PRIME_RENDER_OFFLOAD": "1", + "__GLX_VENDOR_LIBRARY_NAME": "nvidia"} + else: + offload_env = {"DRI_PRIME": "pci-{}".format(dri_pci_name_format)} + + env.update(offload_env) + self.logger.info("prime offload env: {}".format(offload_env)) + + # if nv driver under nvidia mode, prime/reverse prime couldn't work. + self.check_nv_offload_env() + + # use other thread to check offload is correctly or not + check_thread = threading.Thread(target=self.check_offload, + args=(cmd, card_id, + card_name, + timeout)) + check_thread.start() + try: + with subprocess.Popen(offload_cmd, env=env, + stdout=subprocess.PIPE, + universal_newlines=True) as offload: + + self.logger.info("offload command:[{}]".format(offload_cmd)) + + # redirect offload command output real time + while offload.poll() is None: + line = offload.stdout.readline().strip() + self.logger.info(line) + check_thread.join() + if self.check_result: + raise SystemExit("offload to specific GPU failed") + except subprocess.CalledProcessError as e: + raise SystemExit("run offload command failed {}".format(repr(e))) + + def parse_args(self, args=sys.argv[1:]): + """ + command line arguments parsing + + :param args: arguments from sys + :type args: sys.argv + """ + parser = argparse.ArgumentParser( + prog="Prime offload tester", + description="Test prime offload feature", + ) + + parser.add_argument( + "-c", "--command", type=str, default='glxgears', + help='command to offload to specific GPU (default: %(default)s)' + ) + parser.add_argument( + "-p", "--pci", type=str, default='0000:00:02.0', + help='pci name in NNNN:NN:NN.N format (default: %(default)s)' + ) + parser.add_argument( + "-d", "--driver", type=str, default='i915', + help='Type of GPU driver (default: %(default)s)' + ) + parser.add_argument( + "-t", "--timeout", type=int, default=20, + help='executing command duration in second (default: %(default)s).' + ) + return parser.parse_args(args) + + def main(self): + args = self.parse_args() + + # create self.logger.formatter + log_formatter = logging.Formatter(fmt='%(message)s') + + # create logger + self.logger.setLevel(logging.INFO) + + # create console handler + console_handler = logging.StreamHandler() + console_handler.setFormatter(log_formatter) + + # Add console handler to logger + self.logger.addHandler(console_handler) + + # run_offload_cmd("glxgears", "0000:00:02.0", "i915", 0) + self.run_offload_cmd(args.command, + args.pci, + args.driver, + args.timeout) + + +if __name__ == "__main__": + PrimeOffloader().main() diff --git a/providers/base/tests/test_prime_offload_tester.py b/providers/base/tests/test_prime_offload_tester.py new file mode 100755 index 0000000000..3cba0f007c --- /dev/null +++ b/providers/base/tests/test_prime_offload_tester.py @@ -0,0 +1,519 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# Written by: +# Hanhsuan Lee +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import unittest +from unittest.mock import patch, MagicMock + +from prime_offload_tester import * + + +class FindCardIdTests(unittest.TestCase): + """ + This function should extract card id from debug file system by pci name + (pci bus information) + """ + + @patch("subprocess.check_output") + def test_pci_name_format_check(self, mock_check): + po = PrimeOffloader() + # correct format + mock_check.return_value = "/sys/kernel/debug/dri/0/name" + self.assertEqual(po.find_card_id("0000:00:00.0"), "0") + mock_check.assert_called_with(["grep", + "-lr", + "--include=name", + "0000:00:00.0", + "/sys/kernel/debug/dri"], + universal_newlines=True) + # should work with hex vaule + self.assertEqual(po.find_card_id("0000:c6:F0.0"), "0") + + # error format - with alphabet + with self.assertRaises(SystemExit): + po.find_card_id("000r:00:00.0") + + # error format - digital position error + with self.assertRaises(SystemExit): + po.find_card_id("0000:00:000.0") + + @patch("subprocess.check_output") + def test_id_not_found(self, mock_check): + po = PrimeOffloader() + # empty string + mock_check.return_value = "" + with self.assertRaises(SystemExit): + po.find_card_id("0000:00:00.0") + mock_check.assert_called_with(["grep", + "-lr", + "--include=name", + "0000:00:00.0", + "/sys/kernel/debug/dri"], + universal_newlines=True) + + # subprocess error + mock_check.side_effect = subprocess.CalledProcessError(-1, "test") + with self.assertRaises(SystemExit): + po.find_card_id("0000:00:00.0") + mock_check.assert_called_with(["grep", + "-lr", + "--include=name", + "0000:00:00.0", + "/sys/kernel/debug/dri"], + universal_newlines=True) + + +class FindCardNameTests(unittest.TestCase): + """ + This function should extract card name from lshw by pci name + (pci bus information) + """ + + lshw_output = """ + [ + { + "id" : "display", + "class" : "display", + "claimed" : true, + "handle" : "PCI:0000:00:02.0", + "description" : "VGA compatible controller", + "product" : "TigerLake-LP GT2 [Iris Xe Graphics]", + "vendor" : "Intel Corporation", + "physid" : "2", + "businfo" : "pci@0000:00:02.0", + "logicalname" : "/dev/fb0", + "version" : "01", + "width" : 64, + "clock" : 33000000, + "configuration" : { + "depth" : "32", + "driver" : "i915", + "latency" : "0", + "mode" : "1920x1080", + "resolution" : "1920,1080", + "visual" : "truecolor", + "xres" : "1920", + "yres" : "1080" + }, + "capabilities" : { + "pciexpress" : "PCI Express", + "msi" : "Message Signalled Interrupts", + "pm" : "Power Management", + "vga_controller" : true, + "bus_master" : "bus mastering", + "cap_list" : "PCI capabilities listing", + "rom" : "extension ROM", + "fb" : "framebuffer" + } + } + ]""" + + lshw_output_err = """ + [ + { + "id" : "display", + "class" : "display", + "claimed" : true, + "handle" : "PCI:0000:00:02.0", + "description" : "VGA compatible controller", + "product" : "TigerLake-LP GT2 [Iris Xe Graphics]", + "vendor" : "Intel Corporation", + "physid" : "2", + "logicalname" : "/dev/fb0", + "version" : "01", + "width" : 64, + "clock" : 33000000, + "configuration" : { + "depth" : "32", + "driver" : "i915", + "latency" : "0", + "mode" : "1920x1080", + "resolution" : "1920,1080", + "visual" : "truecolor", + "xres" : "1920", + "yres" : "1080" + } + } + ]""" + + @patch("subprocess.check_output") + def test_name_found_check(self, mock_check): + po = PrimeOffloader() + mock_check.return_value = self.lshw_output + self.assertEqual(po.find_card_name("0000:00:02.0"), + "TigerLake-LP GT2 [Iris Xe Graphics]") + mock_check.assert_called_with(["lshw", + "-c", + "display", + "-json"], + universal_newlines=True) + + @patch("subprocess.check_output") + def test_name_not_found_check(self, mock_check): + po = PrimeOffloader() + # pci_name error + mock_check.return_value = self.lshw_output + with self.assertRaises(SystemExit): + po.find_card_name("0000:00:00.0") + mock_check.assert_called_with(["lshw", + "-c", + "display", + "-json"], + universal_newlines=True) + + # no businfo in lshw output + mock_check.return_value = self.lshw_output_err + with self.assertRaises(SystemExit): + po.find_card_name("0000:00:00.0") + mock_check.assert_called_with(["lshw", + "-c", + "display", + "-json"], + universal_newlines=True) + + # empty string + mock_check.return_value = "" + with self.assertRaises(SystemExit): + po.find_card_name("0000:00:00.0") + mock_check.assert_called_with(["lshw", + "-c", + "display", + "-json"], + universal_newlines=True) + + # None + mock_check.return_value = None + with self.assertRaises(SystemExit): + po.find_card_name("0000:00:00.0") + mock_check.assert_called_with(["lshw", + "-c", + "display", + "-json"], + universal_newlines=True) + + # subprocess error + mock_check.side_effect = subprocess.CalledProcessError(-1, "test") + with self.assertRaises(SystemExit): + po.find_card_name("0000:00:00.0") + mock_check.assert_called_with(["lshw", + "-c", + "display", + "-json"], + universal_newlines=True) + + @patch("subprocess.check_output") + def test_get_clients(self, mock_check): + po = PrimeOffloader() + mock_check.return_value = "echo" + self.assertEqual(po.get_clients(0), "echo") + mock_check.assert_called_with(["cat", + "/sys/kernel/debug/dri/0/clients" + ], + universal_newlines=True) + + # subprocess failed + mock_check.side_effect = subprocess.CalledProcessError(-1, "fail") + with self.assertRaises(subprocess.CalledProcessError): + po.check_nv_offload_env() + self.assertEqual(po.get_clients(0), None) + + +class CheckOffloadTests(unittest.TestCase): + """ + This function will check process is showed in specific dri devide + debug file system + """ + + @patch('time.sleep', return_value=None) + @patch("prime_offload_tester.PrimeOffloader.get_clients") + def test_offload_succ_check(self, mock_client, mock_sleep): + cmd = ["echo"] + mock_client.return_value = cmd + po = PrimeOffloader() + self.assertEqual(po.check_offload(cmd, "card_id", "card_name", 1), + None) + self.assertEqual(po.check_result, False) + + @patch('time.sleep', return_value=None) + @patch("prime_offload_tester.PrimeOffloader.get_clients") + def test_offload_fail_check(self, mock_client, mock_sleep): + cmd = ["echo"] + # get_clients return string that doesn't include cmd + mock_client.return_value = "" + po = PrimeOffloader() + self.assertEqual(po.check_offload(cmd, "card_id", "card_name", 1), + None) + self.assertEqual(po.check_result, True) + + # get_clients return None by CalledProcessError + mock_client.return_value = None + po = PrimeOffloader() + self.assertEqual(po.check_offload(cmd, "card_id", "card_name", 1), + None) + self.assertEqual(po.check_result, True) + + +class CheckNvOffloadEnvTests(unittest.TestCase): + """ + This function will check this system could use prime offload or not. + Only on-demand mode is supported for NV driver. + """ + + @patch("subprocess.check_output") + def test_on_demand_check(self, mock_check): + po = PrimeOffloader() + # with nv driver, not on-demand mode + mock_check.return_value = "prime-select" + with self.assertRaises(SystemExit): + po.check_nv_offload_env() + mock_check.assert_called_with(["prime-select", + "query"], + universal_newlines=True) + + @patch("subprocess.check_output") + def test_nvlink_check(self, mock_check): + po = PrimeOffloader() + # with nv driver, on-demand mode. This might be NVLINK environment + mock_check.return_value = "prime-select on-demand" + with self.assertRaises(SystemExit): + po.check_nv_offload_env() + mock_check.assert_called_with(["nvidia-smi", + "nvlink", + "-s"], + universal_newlines=True) + + # with nv driver, on-demand mode, nv driver error + mock_check.side_effect = ["on-demand", "error"] + with self.assertRaises(SystemExit): + po.check_nv_offload_env() + mock_check.assert_called_with(["nvidia-smi", + "nvlink", + "-s"], + universal_newlines=True) + + # with nv driver, on-demand mode, no nv driver error + mock_check.side_effect = ["on-demand", ""] + self.assertEqual(None, po.check_nv_offload_env()) + mock_check.assert_called_with(["nvidia-smi", + "nvlink", + "-s"], + universal_newlines=True) + + # No prime-select + mock_check.side_effect = FileNotFoundError + self.assertEqual(po.check_nv_offload_env(), None) + + +class RunOffloadCmdTests(unittest.TestCase): + """ + This function is the entry point to run the command with prime offload, + if the environment is supported. + """ + + def test_condition_check(self): + po = PrimeOffloader() + # no card id + po.find_card_id = MagicMock(side_effect=SystemExit) + with self.assertRaises(SystemExit): + po.run_offload_cmd("echo", "0000:00:00.0", "driver", 0) + + # no card name + po.find_card_id = MagicMock(return_value="0") + po.find_card_name = MagicMock(side_effect=SystemExit) + with self.assertRaises(SystemExit): + po.run_offload_cmd("echo", "0000:00:00.0", "driver", 0) + + # timeout in command + po.find_card_id = MagicMock(return_value="0") + po.find_card_name = MagicMock(return_value="Card") + with self.assertRaises(SystemExit): + po.run_offload_cmd("timeout 10 echo", + "0000:00:00.0", + "driver", + 0) + + # check_nv_offload_env failed + po.find_card_id = MagicMock(return_value="0") + po.find_card_name = MagicMock(return_value="Card") + po.check_nv_offload_env = MagicMock(side_effect=SystemExit) + with self.assertRaises(SystemExit): + po.run_offload_cmd("echo", + "0000:00:00.0", + "driver", + 0) + + @patch('time.sleep', return_value=None) + @patch("subprocess.Popen") + def test_offload_cmd_check(self, mock_open, mock_sleep): + nv_env = {'__NV_PRIME_RENDER_OFFLOAD': '1', + '__GLX_VENDOR_LIBRARY_NAME': 'nvidia'} + o_env = {'DRI_PRIME': 'pci-0000_00_00_0'} + + # non NV driver + po = PrimeOffloader() + po.find_card_id = MagicMock(return_value="0") + po.find_card_name = MagicMock(return_value="Intel") + po.check_nv_offload_env = MagicMock(return_value=None) + po.check_offload = MagicMock(return_value="") + os.environ.copy = MagicMock(return_value={}) + po.run_offload_cmd("echo", "0000:00:00.0", "xxx", 0) + # check run_offload_cmd executing correct command + mock_open.assert_called_with(["echo"], + env=o_env, + stdout=subprocess.PIPE, + universal_newlines=True) + # check check_offload function get correct args + po.check_offload.assert_called_with(["echo"], '0', 'Intel', 20) + + # non NV driver with timeout setting + po.find_card_id = MagicMock(return_value="0") + po.find_card_name = MagicMock(return_value="Intel") + po.check_nv_offload_env = MagicMock(return_value=None) + po.check_offload = MagicMock(return_value="") + os.environ.copy = MagicMock(return_value={}) + po.run_offload_cmd("echo", "0000:00:00.0", "xxx", 1) + # check run_offload_cmd executing correct command + mock_open.assert_called_with(["timeout", "1", "echo"], + env=o_env, + stdout=subprocess.PIPE, + universal_newlines=True) + # check check_offload function get correct args + po.check_offload.assert_called_with(["echo"], '0', 'Intel', 1) + + # NV driver + po.find_card_id = MagicMock(return_value="0") + po.find_card_name = MagicMock(return_value="NV") + po.check_nv_offload_env = MagicMock(return_value=None) + po.check_offload = MagicMock(return_value="") + os.environ.copy = MagicMock(return_value={}) + po.run_offload_cmd("echo", "0000:00:00.0", "nvidia", 1) + # check run_offload_cmd executing correct command + mock_open.assert_called_with(["timeout", "1", "echo"], + env=nv_env, + stdout=subprocess.PIPE, + universal_newlines=True) + # check check_offload function get correct args + po.check_offload.assert_called_with(["echo"], '0', 'NV', 1) + + # subprocess error + po.find_card_id = MagicMock(return_value="0") + po.find_card_name = MagicMock(return_value="NV") + po.check_nv_offload_env = MagicMock(return_value=None) + po.check_offload = MagicMock(return_value="") + os.environ.copy = MagicMock(return_value={}) + mock_open.side_effect = subprocess.CalledProcessError(-1, "test") + with self.assertRaises(SystemExit): + po.run_offload_cmd("echo", "0000:00:00.0", "nvidia", 1) + # check run_offload_cmd executing correct command + mock_open.assert_called_with(["timeout", "1", "echo"], + env=nv_env, + stdout=subprocess.PIPE, + universal_newlines=True) + # check check_offload function get correct args + po.check_offload.assert_called_with(["echo"], '0', 'NV', 1) + + # check offload failed + po.find_card_id = MagicMock(return_value="0") + po.find_card_name = MagicMock(return_value="NV") + po.check_nv_offload_env = MagicMock(return_value=None) + po.check_offload = MagicMock(return_value="") + os.environ.copy = MagicMock(return_value={}) + po.check_result = True + mock_open.side_effect = None + with self.assertRaises(SystemExit): + po.run_offload_cmd("echo", "0000:00:00.0", "nvidia", 1) + # check run_offload_cmd executing correct command + mock_open.assert_called_with(["timeout", "1", "echo"], + env=nv_env, + stdout=subprocess.PIPE, + universal_newlines=True) + # check check_offload function get correct args + po.check_offload.assert_called_with(["echo"], '0', 'NV', 1) + + +class ParseArgsTests(unittest.TestCase): + def test_success(self): + po = PrimeOffloader() + # no arguments, load default + args = [] + rv = po.parse_args(args) + self.assertEqual(rv.command, "glxgears") + self.assertEqual(rv.pci, "0000:00:02.0") + self.assertEqual(rv.driver, "i915") + self.assertEqual(rv.timeout, 20) + + # change command + args = ["-c", "glxgears -fullscreen"] + rv = po.parse_args(args) + self.assertEqual(rv.command, "glxgears -fullscreen") + self.assertEqual(rv.pci, "0000:00:02.0") + self.assertEqual(rv.driver, "i915") + self.assertEqual(rv.timeout, 20) + + # change pci + args = ["-p", "0000:00:01.0"] + rv = po.parse_args(args) + self.assertEqual(rv.command, "glxgears") + self.assertEqual(rv.pci, "0000:00:01.0") + self.assertEqual(rv.driver, "i915") + self.assertEqual(rv.timeout, 20) + + # change driver + args = ["-d", "nvidia"] + rv = po.parse_args(args) + self.assertEqual(rv.command, "glxgears") + self.assertEqual(rv.pci, "0000:00:02.0") + self.assertEqual(rv.driver, "nvidia") + self.assertEqual(rv.timeout, 20) + + # change timeout + args = ["-t", "5"] + rv = po.parse_args(args) + self.assertEqual(rv.command, "glxgears") + self.assertEqual(rv.pci, "0000:00:02.0") + self.assertEqual(rv.driver, "i915") + self.assertEqual(rv.timeout, 5) + + # change all + args = ["-c", "glxgears -fullscreen", + "-p", "0000:00:01.0", + "-d", "nvidia", + "-t", "5"] + rv = po.parse_args(args) + self.assertEqual(rv.command, "glxgears -fullscreen") + self.assertEqual(rv.pci, "0000:00:01.0") + self.assertEqual(rv.driver, "nvidia") + self.assertEqual(rv.timeout, 5) + + +class MainTests(unittest.TestCase): + @patch("prime_offload_tester.PrimeOffloader.parse_args") + @patch("prime_offload_tester.PrimeOffloader.run_offload_cmd") + def test_run_offload_cmd_succ(self, mock_run_offload, mock_parse_args): + self.assertEqual(PrimeOffloader().main(), None) + + @patch("prime_offload_tester.PrimeOffloader.parse_args") + @patch("prime_offload_tester.PrimeOffloader.run_offload_cmd") + def test_run_offload_cmd_fail(self, mock_run_offload, mock_parse_args): + po = PrimeOffloader() + mock_run_offload.side_effect = SystemExit + with self.assertRaises(SystemExit) as cm: + po.main() + self.assertNotEqual(cm.exception.code, 0) + + +if __name__ == '__main__': + unittest.main() diff --git a/providers/resource/bin/graphics_card_resource.py b/providers/resource/bin/graphics_card_resource.py index 9a0c2435cc..78e5e5d859 100755 --- a/providers/resource/bin/graphics_card_resource.py +++ b/providers/resource/bin/graphics_card_resource.py @@ -105,6 +105,9 @@ def udev_devices(lines): key, value = line.split(":", 1) key = key.strip() record[key] = value.strip() + if key == 'path': + v = value.strip().split('/') + record['pci_device_name'] = v[len(v) - 1] except ValueError: # If a line has no colon it's suspicious, maybe a # bogus input file. Let's discard it. diff --git a/providers/resource/tests/test_graphics_card_resource.py b/providers/resource/tests/test_graphics_card_resource.py new file mode 100755 index 0000000000..4231a96b11 --- /dev/null +++ b/providers/resource/tests/test_graphics_card_resource.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# +# This file is part of Checkbox. +# +# Copyright 2023 Canonical Ltd. +# Authors: Hanhsuan Lee +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . + +import unittest +from graphics_card_resource import * + + +class UdevDevicesTests(unittest.TestCase): + record_line = ["path: /devices/pci0000:00/0000:00:02.1/0000:01:00.0"] + + def test_success(self): + record = udev_devices(self.record_line) + record_list = list(record) + self.assertEqual(len(record_list), 1) + self.assertEqual(record_list[0]['pci_device_name'], "0000:01:00.0") + + +if __name__ == '__main__': + unittest.main() From 9f063815559df97beae933dfffd81c858253c86a Mon Sep 17 00:00:00 2001 From: LiaoU3 <58060146+LiaoU3@users.noreply.github.com> Date: Tue, 19 Mar 2024 03:22:18 +0800 Subject: [PATCH 094/108] Add requires for bluetooth/keyboard and bluetooth/keyboard-manual (Bugfix) (#1073) Add requires for bluetooth/keyboard and bluetooth/keyboard-manual --- providers/base/units/bluetooth/jobs.pxu | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/providers/base/units/bluetooth/jobs.pxu b/providers/base/units/bluetooth/jobs.pxu index 8570c1386d..9ceea173e4 100644 --- a/providers/base/units/bluetooth/jobs.pxu +++ b/providers/base/units/bluetooth/jobs.pxu @@ -357,6 +357,8 @@ requires: device.category == 'BLUETOOTH' estimated_duration: 2.0 id: bluetooth/keyboard +imports: from com.canonical.plainbox import manifest +requires: manifest.has_bt_adapter == 'True' _summary: Bluetooth keyboard works _purpose: Check if bluetooth keyboard works @@ -370,6 +372,8 @@ category_id: com.canonical.plainbox::bluetooth estimated_duration: 1m id: bluetooth/keyboard-manual +imports: from com.canonical.plainbox import manifest +requires: manifest.has_bt_adapter == 'True' _summary: Bluetooth keyboard manual test _purpose: Check bluetooth input device works From 3991a48b346f9efa0d04ce0df8d56f9ede197025 Mon Sep 17 00:00:00 2001 From: kissiel Date: Mon, 18 Mar 2024 21:31:44 +0100 Subject: [PATCH 095/108] Add tiny bit of tests to the storage tests (New) (#1004) * add tiny bit of tests to the storage tests * add psutil to the base prov. tox.ini * add psutil also for py36 --- providers/base/tests/test_storage_test.py | 30 +++++++++++++++++++++++ providers/base/tox.ini | 2 ++ 2 files changed, 32 insertions(+) create mode 100644 providers/base/tests/test_storage_test.py diff --git a/providers/base/tests/test_storage_test.py b/providers/base/tests/test_storage_test.py new file mode 100644 index 0000000000..8c80fe5a48 --- /dev/null +++ b/providers/base/tests/test_storage_test.py @@ -0,0 +1,30 @@ +import unittest +from unittest.mock import patch, MagicMock + + +from storage_test import mountpoint + +class TestMountpoint(unittest.TestCase): + @patch("psutil.disk_partitions") + def test_mountpoint_nominal(self, mock_disk_partitions): + + sdiskpart = MagicMock() + sdiskpart.device = '/dev/sda1' + sdiskpart.mountpoint = '/' + mock_disk_partitions.return_value = [sdiskpart] + self.assertEqual(mountpoint("/dev/sda1"), "/") + + @patch("psutil.disk_partitions") + def test_mountpoint_nominal_multiple(self, mock_disk_partitions): + + mock_disk_partitions.return_value = [ + MagicMock(device='/dev/sda1', mountpoint='/'), + MagicMock(device='/dev/sda2', mountpoint='/boot') + ] + self.assertEqual(mountpoint("/dev/sda2"), "/boot") + + + @patch("psutil.disk_partitions") + def test_mountpoint_empty(self, mock_disk_partitions): + mock_disk_partitions.return_value = [] + self.assertEqual(mountpoint('/dev/sda1'), None) diff --git a/providers/base/tox.ini b/providers/base/tox.ini index 3484b56759..8446902b5e 100644 --- a/providers/base/tox.ini +++ b/providers/base/tox.ini @@ -27,6 +27,7 @@ deps = MarkupSafe == 0.23 natsort == 4.0.3 opencv_python == 4.4.0.42 + psutil == 5.9.5 requests == 2.9.1 tqdm == 4.19.5 urwid == 1.3.1 @@ -48,6 +49,7 @@ deps = MarkupSafe == 1.1.0 natsort == 4.0.3 opencv_python == 4.4.0.42 + psutil == 5.9.5 requests == 2.18.4 tqdm == 4.19.5 urwid == 2.0.1 From ade53692b2e75f88387ebbd5e6526a4fc4e75ed2 Mon Sep 17 00:00:00 2001 From: kissiel Date: Tue, 19 Mar 2024 09:21:01 +0100 Subject: [PATCH 096/108] Update definitions in base/audio jobs using an AI tool (BugFix) (#1083) - Update old descriptions into the purpose/steps/verification - Fix grammar and typo - Add summary for jobs without a summary --- providers/base/units/audio/jobs.pxu | 301 ++++++++++++----------- providers/base/units/audio/manifest.pxu | 3 + providers/base/units/audio/test-plan.pxu | 1 + 3 files changed, 161 insertions(+), 144 deletions(-) diff --git a/providers/base/units/audio/jobs.pxu b/providers/base/units/audio/jobs.pxu index da9e46929e..c0061b2163 100644 --- a/providers/base/units/audio/jobs.pxu +++ b/providers/base/units/audio/jobs.pxu @@ -6,7 +6,8 @@ requires: device.category == 'AUDIO' package.name == 'alsa-base' command: cat /proc/asound/cards -_description: Test to detect audio devices +_purpose: Test to detect audio devices +_summary: Check if audio devices can be detected. plugin: shell category_id: com.canonical.plainbox::audio @@ -27,7 +28,8 @@ command: fi done exit $fail -_description: Valid sof firmware signature +_purpose: Validate SOF firmware signature. +_summary: Ensure SOF firmware signature is valid. plugin: user-interact-verify category_id: com.canonical.plainbox::audio @@ -54,15 +56,16 @@ command: fi exit $EXIT_CODE _description: - PURPOSE: - This test will check that internal speakers work correctly - STEPS: - 1. Make sure that no external speakers or headphones are connected - When testing a desktop, you can skip this test if there is no - internal speaker, we will test the external output later - 2. Commence the test to play a brief tone on your audio device - VERIFICATION: - Did you hear a tone? +_purpose: + This test will check that internal speakers work correctly +_steps: + 1. Make sure that no external speakers or headphones are connected + When testing a desktop, you can skip this test if there is no + internal speaker, we will test the external output later + 2. Commence the test to play a brief tone on your audio device +_verification: + Did you hear a tone? +_summary: Ensure the internal speakers are working by playing a tone. unit: template template-resource: graphics_card @@ -275,11 +278,13 @@ command: _purpose: HDMI audio via USB Type-C port interface verification _steps: - 1. Plug an external HDMI device with sound on a USB Type-C port using a "USB Typce-C to HDMI" adapter (Use only one HDMI/DisplayPort/Thunderbolt interface at a time for this test) - 2. Commence the test + 1. Plug an external HDMI device with sound into a USB Type-C port using a "USB Type-C to HDMI" adapter (Use only one HDMI/DisplayPort/Thunderbolt interface at a time for this test) + 2. Begin the test _verification: Did you hear the sound from the HDMI device? - +_summary: + Verify HDMI audio playback through the USB Type-C port using a "USB Type-C to HDMI" adapter and confirm sound output. + plugin: user-interact-verify category_id: com.canonical.plainbox::audio flags: also-after-suspend @@ -305,13 +310,14 @@ command: fi exit $EXIT_CODE _description: - PURPOSE: - This test will check that headphones connector works correctly - STEPS: - 1. Connect a pair of headphones to your audio device - 2. Commence the test to play a sound to your audio device - VERIFICATION: - Did you hear a sound through the headphones and did the sound play without any distortion, clicks or other strange noises from your headphones? +_purpose: + This test will check that the headphones connector works correctly. +_steps: + 1. Connect a pair of headphones to your audio device. + 2. Commence the test to play a sound through your audio device. +_verification: + Did you hear a sound through the headphones, and did the sound play without any distortion, clicks, or other strange noises from your headphones? +_summary: Verify headphone connectivity and audio playback quality. plugin: user-interact-verify category_id: com.canonical.plainbox::audio @@ -338,14 +344,15 @@ command: fi exit $EXIT_CODE _description: - PURPOSE: - This test will check that recording sound using the onboard microphone works correctly - STEPS: - 1. Disconnect any external microphones that you have plugged in - 2. Click "Test", then speak into your internal microphone - 3. After a few seconds, your speech will be played back to you. - VERIFICATION: - Did you hear your speech played back? +_purpose: + This test will check that recording sound using the onboard microphone works correctly +_steps: + 1. Disconnect any external microphones that you have plugged in + 2. Click "Test", then speak into your internal microphone + 3. After a few seconds, your speech will be played back to you. +_verification: + Did you hear your speech played back? +_summary: Test internal microphone recording and playback functionality. plugin: user-interact-verify category_id: com.canonical.plainbox::audio @@ -371,15 +378,15 @@ command: audio_settings.py restore --file="$PLAINBOX_SESSION_SHARE"/pulseaudio_settings fi exit $EXIT_CODE -_description: - PURPOSE: +_purpose: This test will check that recording sound using an external microphone works correctly - STEPS: +_steps: 1. Connect a microphone to your microphone port 2. Click "Test", then speak into the external microphone 3. After a few seconds, your speech will be played back to you - VERIFICATION: +_verification: Did you hear your speech played back? +_summary: Verify external microphone sound recording and playback. plugin: user-interact-verify category_id: com.canonical.plainbox::audio @@ -402,15 +409,15 @@ command: audio_settings.py restore --file="$PLAINBOX_SESSION_SHARE"/pulseaudio_settings fi exit $EXIT_CODE -_description: - PURPOSE: - This test will check that a USB audio device works correctly - STEPS: - 1. Connect a USB audio device to your system - 2. Click "Test", then speak into the microphone - 3. After a few seconds, your speech will be played back to you - VERIFICATION: - Did you hear your speech played back through the USB headphones? +_purpose: + This test will check that a USB audio device works correctly +_steps: + 1. Connect a USB audio device to your system + 2. Click "Test", then speak into the microphone + 3. After a few seconds, your speech will be played back to you +_verification: + Did you hear your speech played back through the USB headphones? +_summary: Verify USB audio device functionality through record and playback test. plugin: shell category_id: com.canonical.plainbox::audio @@ -433,9 +440,8 @@ command: else audio_test.py fi -_description: - Play back a sound on the default output and listen for it on the - default input. +_purpose: Play back a sound on the default output and listen for it on the default input. +_summary: Test playback and recording functionality on the default audio input and output. plugin: shell category_id: com.canonical.plainbox::audio @@ -452,8 +458,8 @@ command: else pactl_list.sh sinks fi -_description: - Test to detect if there's available sinks +_purpose: Test to detect if there are available sinks +_summary: Ensure audio sinks are available for detection. plugin: shell category_id: com.canonical.plainbox::audio @@ -470,18 +476,19 @@ command: else pactl_list.sh sources fi -_description: - Test to detect if there's available sources. +_purpose: Test to detect if there are available sources. +_summary: Test to ensure audio sources can be detected. plugin: shell category_id: com.canonical.plainbox::audio id: audio/alsa_info_collect estimated_duration: 2.0 command: alsa_info --no-dialog --no-upload --output "${PLAINBOX_SESSION_SHARE}"/alsa_info.log -_description: +_purpose: Collect audio-related system information. This data can be used to simulate this computer's audio subsystem and perform more detailed tests under a controlled environment. +_summary: Collect audio-related system information for simulation and detailed testing. plugin: attachment category_id: com.canonical.plainbox::audio @@ -489,8 +496,8 @@ id: audio/alsa_info_attachment depends: audio/alsa_info_collect estimated_duration: 1.0 command: [ -e "${PLAINBOX_SESSION_SHARE}"/alsa_info.log ] && cat "${PLAINBOX_SESSION_SHARE}"/alsa_info.log -_description: - Attaches the audio hardware data collection log to the results. +_purpose: Attaches the audio hardware data collection log to the results. +_summary: Attach audio hardware data collection log to the results. plugin: user-interact-verify category_id: com.canonical.plainbox::audio @@ -498,13 +505,13 @@ id: audio/channels flags: also-after-suspend estimated_duration: 20.0 command: speaker-test -c 2 -l 1 -t wav -_description: - PURPOSE: - Check that the various audio channels are working properly - STEPS: - 1. Commence the test - VERIFICATION: - You should clearly hear a voice from the different audio channels +_purpose: + Check that the various audio channels are working properly +_steps: + 1. Commence the test +_verification: + You should clearly hear a voice from the different audio channels +_summary: Verify that all audio channels are functioning correctly by hearing a voice clearly from them. plugin: shell category_id: com.canonical.plainbox::audio @@ -514,13 +521,14 @@ requires: package.name == 'pulseaudio-utils' device.category == 'AUDIO' command: volume_test.py --minvol 1 --maxvol 100 -_description: +_purpose: This test will verify that the volume levels are at an acceptable level on - your local system. The test will validate that the volume is greater than + your local system. The test will validate that the volume is greater than or equal to minvol and less than or equal to maxvol for all sources (inputs) - and sinks (outputs) recognized by PulseAudio. It will also validate that the - active source and sink are not muted. You should not manually adjust the + and sinks (outputs) recognized by PulseAudio. It will also validate that the + active source and sink are not muted. You should not manually adjust the volume or mute before running this test. +_summary: Verify acceptable volume levels on the system for all sources and sinks recognized by PulseAudio. plugin: manual category_id: com.canonical.plainbox::audio @@ -529,17 +537,17 @@ flags: also-after-suspend estimated_duration: 30.0 requires: dmi.product in ['Desktop','Low Profile Desktop','Tower','Mini Tower','Space-saving','All In One','All-In-One','AIO'] -_description: - PURPOSE: +_purpose: Check that external line out connection works correctly - STEPS: +_steps: 1. Insert cable to speakers (with built-in amplifiers) on the line out port 2. Open system sound preferences, 'Output' tab, select 'Line Out' on the connector list. Commence the test 3. On the system sound preferences, click 'Test Sound' to check left and right channel - VERIFICATION: - 1. Do you see internal speakers? The internal speakers should be replaced by external speaker from Line out port, if any +_verification: + 1. Do you see internal speakers? The internal speakers should be replaced by external speakers from the Line out port, if any 2. Do you hear the sound in the internal speakers? The internal speakers should be muted automatically - 3. Do you hear the sound coming out on the corresponding channel by external speakers from Line out port? + 3. Do you hear the sound coming out on the corresponding channel by external speakers from the Line out port? +_summary: Verify external line out connection functionality by inserting a cable to speakers, selecting 'Line Out' in system sound preferences, and testing sound channels. plugin: user-interact-verify category_id: com.canonical.plainbox::audio @@ -566,14 +574,15 @@ command: fi exit $EXIT_CODE _description: - PURPOSE: - Check that external line in connection works correctly - STEPS: - 1. Use a cable to connect the line in port to an external line out source. - 2. Open system sound preferences, 'Input' tab, select 'Line-in' on the connector list. Commence the test +_purpose: + Check that external line-in connection works correctly +_steps: + 1. Use a cable to connect the line-in port to an external line-out source. + 2. Open system sound preferences, 'Input' tab, select 'Line-in' on the connector list. Commence the test. 3. After a few seconds, your recording will be played back to you. - VERIFICATION: +_verification: Did you hear your recording? +_summary: Verify external line-in connection functionality by recording and playback testing. plugin: user-interact category_id: com.canonical.plainbox::audio @@ -584,23 +593,23 @@ requires: device.category == 'AUDIO' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py monitor -t 30 -m sinks else pulse_active_port_change.py sinks fi -_description: - PURPOSE: - Check that system detects speakers or headphones being plugged in - STEPS: - 1. Prepare a pair of headphones or speakers with a standard 3.5mm jack - 2. Locate the speaker / headphone jack on the device under test - 3. Run the test (you have 30 seconds from now on) - 4. Plug headphones or speakers into the appropriate jack - 5. Unplug the device for subsequent tests. - VERIFICATION: - Verification is automatic, no action is required. - The test times out after 30 seconds (and fails in that case). +_purpose: + Check that system detects speakers or headphones being plugged in +_steps: + 1. Prepare a pair of headphones or speakers with a standard 3.5mm jack + 2. Locate the speaker / headphone jack on the device under test + 3. Run the test (you have 30 seconds from now on) + 4. Plug headphones or speakers into the appropriate jack + 5. Unplug the device for subsequent tests. +_verification: + Verification is automatic, no action is required. + The test times out after 30 seconds (and fails in that case). +_summary: Ensure the system automatically detects when speakers or headphones are plugged in. plugin: user-interact category_id: com.canonical.plainbox::audio @@ -611,24 +620,24 @@ requires: device.category == 'AUDIO' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py monitor -t 30 -m sources else pulse_active_port_change.py sources fi -_description: - PURPOSE: +_purpose: Check that system detects a microphone being plugged in - STEPS: +_steps: 1. Prepare a microphone with a standard 3.5mm jack 2. Locate the microphone jack on the device under test. Keep in mind that it may be shared with the headphone jack. 3. Run the test (you have 30 seconds from now on) 4. Plug the microphone into the appropriate jack 5. Unplug the device for subsequent tests. - VERIFICATION: +_verification: Verification is automatic, no action is required. The test times out after 30 seconds (and fails in that case). +_summary: Ensure that the system can detect when a microphone is plugged in. plugin: user-interact-verify category_id: com.canonical.plainbox::audio @@ -653,16 +662,16 @@ command: audio_settings.py restore --file="$PLAINBOX_SESSION_SHARE"/pulseaudio_settings fi exit $EXIT_CODE -_description: - PURPOSE: +_purpose: Check that balance control works correctly on internal speakers - STEPS: +_steps: 1. Check that moving the balance slider from left to right works smoothly 2. Commence the test to play an audio tone for 10 seconds. 3. Move the balance slider from left to right and back. 4. Check that actual speaker audio balance follows your setting. - VERIFICATION: - Does the slider move smoothly, as well as being followed by the setting by the actual audio output? +_verification: + Does the slider move smoothly, as well as being followed by the actual audio output? +_summary: Test the balance control on internal speakers by playing an audio tone and adjusting the balance slider. plugin: user-interact-verify category_id: com.canonical.plainbox::audio @@ -689,15 +698,16 @@ command: fi exit $EXIT_CODE _description: - PURPOSE: - Check that balance control works correctly on external headphone - STEPS: - 1. Check that moving the balance slider from left to right works smoothly +_purpose: + Check that balance control works correctly on external headphones +_steps: + 1. Check that moving the balance slider from left to right works smoothly. 2. Commence the test to play an audio tone for 10 seconds. 3. Move the balance slider from left to right and back. - 4. Check that actual headphone audio balance follows your setting. - VERIFICATION: - Does the slider move smoothly, as well as being followed by the setting by the actual audio output? + 4. Check that actual headphone audio balance follows your settings. +_verification: + Does the slider move smoothly, as well as being followed by the settings by the actual audio output? +_summary: Test for checking balance control functioning on external headphones. plugin: shell category_id: com.canonical.plainbox::audio @@ -708,7 +718,8 @@ requires: device.category == 'AUDIO' package.name == 'alsa-base' command: cat /proc/asound/cards -_description: Test to detect audio devices after suspending 30 times. +_purpose: Test to detect audio devices after suspending 30 times. +_summary: Test the detection of audio devices post 30 suspend cycles. plugin: user-interact-verify category_id: com.canonical.plainbox::audio @@ -734,15 +745,16 @@ command: fi exit $EXIT_CODE _description: - PURPOSE: +_purpose: This test will check that internal speakers work correctly after suspending 30 times. - STEPS: +_steps: 1. Make sure that no external speakers or headphones are connected When testing a desktop, you can skip this test if there is no internal speaker, we will test the external output later 2. Commence the test to play a brief tone on your audio device - VERIFICATION: +_verification: Did you hear a tone? +_summary: Verify internal speakers' functionality after 30 suspension cycles. plugin: shell category_id: com.canonical.plainbox::audio @@ -764,9 +776,8 @@ command: audio_test.py else audio_test.py fi -_description: - Play back a sound on the default output and listen for it on the - default input, after suspending 30 times. +_purpose: Play back a sound on the default output and listen for it on the default input, after suspending 30 times. +_summary: Test audio playback and recording post-system suspension. plugin: shell category_id: com.canonical.plainbox::audio @@ -780,8 +791,8 @@ command: else pactl_list.sh sinks fi -_description: - Test to detect if there's available sources and sinks after suspending 30 times. +_purpose: Test to detect if there's available sources and sinks after suspending 30 times. +_summary: Test detection of audio sources and sinks after 30 suspend cycles. plugin: shell category_id: com.canonical.plainbox::audio @@ -792,13 +803,14 @@ requires: package.name == 'pulseaudio-utils' device.category == 'AUDIO' command: volume_test.py --minvol 1 --maxvol 100 -_description: +_purpose: This test will verify that the volume levels are at an acceptable level on - your local system. The test will validate that the volume is greater than + your local system. The test will validate that the volume is greater than or equal to minvol and less than or equal to maxvol for all sources (inputs) - and sinks (outputs) recognized by PulseAudio. It will also validate that the - active source and sink are not muted. You should not manually adjust the + and sinks (outputs) recognized by PulseAudio. It will also validate that the + active source and sink are not muted. You should not manually adjust the volume or mute before running this test. +_summary: Verify that volume levels are within acceptable limits after suspending for 30 cycles. plugin: shell category_id: com.canonical.plainbox::audio @@ -808,10 +820,11 @@ depends: power-management/suspend_30_cycles requires: device.category == 'AUDIO' package.name == 'alsa-base' -_description: Record mixer settings after suspending 30 times. +_purpose: Record mixer settings after suspending 30 times. command: audio_settings.py store --file="$PLAINBOX_SESSION_SHARE"/audio_settings_after_suspend_30_cycles diff "$PLAINBOX_SESSION_SHARE"/audio_settings_before_suspend "$PLAINBOX_SESSION_SHARE"/audio_settings_after_suspend_30_cycles +_summary: Record and compare audio mixer settings before and after suspending the device 30 times. id: audio/detect-playback-devices _summary: Check that at least one audio playback device exists @@ -843,14 +856,14 @@ command: if [ "$COUNT" -eq 0 ]; then exit 1 fi -esimated_duration: 0.5 +estimated_duration: 0.5 id: audio/alsa-playback _summary: Playback works _purpose: Check if sound is played through ALSA on the default output _steps: - 1. Make sure speakers or headphones are connect to the device + 1. Make sure speakers or headphones are connected to the device 2. Commence the test _verification: Did you hear the sound? @@ -881,13 +894,13 @@ requires: manifest.has_audio_loopback_connector == 'True' id: audio/pa-record-internal-mic _summary: Record a wav file and check it using pulseaudio - internal mic _purpose: - Check if audio input work fine through pulseaudio on internal mic + Check if audio input works fine through pulseaudio on the internal mic _steps: - 1. Make sure no external mic is connected to the device - 2. Make sure there's at least one output device connected to the device + 1. Make sure no external mic is connected to the device. + 2. Make sure there's at least one output device connected to the device. 3. Workaround to run pulseaudio correctly: sudo mkdir -p /run/user/0/snap.pulseaudio/pulse - 4. Find out corrent source, sink id: + 4. Find out correct source, sink id: sudo pulseaudio.pactl list 5. Set input/output profile: sudo pulseaudio.pactl set-card-profile 0 output:analog-stereo+input:analog-stereo @@ -898,9 +911,9 @@ _steps: sudo pulseaudio.pactl set-source-volume 80% 7. Record for 5 seconds to a wav file: sudo timeout 5 pulseaudio.parec -v /var/snap/pulseaudio/common/test.wav - 8. Play the recorded file + 8. Play the recorded file: sudo pulseaudio.paplay -v /var/snap/pulseaudio/common/test.wav - 9. Remove the recorded file + 9. Remove the recorded file: sudo rm /var/snap/pulseaudio/common/test.wav _verification: Did you hear the recorded sound correctly? @@ -913,13 +926,13 @@ estimated_duration: 1m id: audio/pa-record-external-mic _summary: Record a wav file and check it using pulseaudio - external mic _purpose: - Check if audio input work fine through pulseaudio on external mic + Check if audio input works fine through pulseaudio on an external mic _steps: 1. Make sure an external mic is connected to the device 2. Make sure there's at least one output device connected to the device 3. Workaround to run pulseaudio correctly: sudo mkdir -p /run/user/0/snap.pulseaudio/pulse - 4. Find out corrent source, sink id: + 4. Find out correct source and sink id: sudo pulseaudio.pactl list 5. Set input/output profile: sudo pulseaudio.pactl set-card-profile 0 output:analog-stereo+input:analog-stereo @@ -945,12 +958,12 @@ estimated_duration: 1m id: audio/pa-playback-headphone _summary: Play sample wav file using pulseaudio - headphone _purpose: - Check if sound is played through pulseaudio to headphone + Check if sound is played through pulseaudio to headphones _steps: - 1. Make sure a headphone is connected to the device + 1. Make sure headphones are connected to the device. 2. Workaround to run pulseaudio correctly: sudo mkdir -p /run/user/0/snap.pulseaudio/pulse - 3. Find out corrent source, sink id: + 3. Find out current source, sink id: sudo pulseaudio.pactl list 4. Set input/output profile: sudo pulseaudio.pactl set-card-profile 0 output:analog-stereo+input:analog-stereo @@ -958,9 +971,9 @@ _steps: sudo pulseaudio.pactl set-sink-mute 0 sudo pulseaudio.pactl set-sink-volume 80% 6. Put a test wav file in system as /var/snap/pulseaudio/common/test.wav - 7. Play the test wav file + 7. Play the test wav file: sudo pulseaudio.paplay -v /var/snap/pulseaudio/common/test.wav - 8. Remove the test file + 8. Remove the test file: sudo rm /var/snap/pulseaudio/common/test.wav _verification: Did you hear the sound correctly? @@ -975,20 +988,20 @@ _summary: Play sample wav file using pulseaudio - lineout _purpose: Check if sound is played through pulseaudio to lineout _steps: - 1. Make sure a output device is connected to the lineout port on device + 1. Make sure an output device is connected to the lineout port on the device. 2. Workaround to run pulseaudio correctly: sudo mkdir -p /run/user/0/snap.pulseaudio/pulse - 3. Find out corrent source, sink id: + 3. Find out current source, sink id: sudo pulseaudio.pactl list 4. Set input/output profile: sudo pulseaudio.pactl set-card-profile 0 output:analog-stereo+input:analog-stereo 5. Unmute and set volume of output: - sudo pulseaudio.pactl set-sink-mute 0 + sudo pulseaudio.pactl set-sink-mute false sudo pulseaudio.pactl set-sink-volume 80% - 6. Put a test wav file in system as /var/snap/pulseaudio/common/test.wav - 7. Play the test wav file + 6. Place a test wav file in the system as /var/snap/pulseaudio/common/test.wav + 7. Play the test wav file: sudo pulseaudio.paplay -v /var/snap/pulseaudio/common/test.wav - 8. Remove the test file + 8. Remove the test file: sudo rm /var/snap/pulseaudio/common/test.wav _verification: Did you hear the sound correctly? @@ -1003,16 +1016,16 @@ _summary: Play sample wav file using pulseaudio - hdmi _purpose: Check if sound is played through pulseaudio to HDMI output device _steps: - 1. Make sure a HDMI output device is connected to the device + 1. Make sure an HDMI output device is connected to the device. 2. Workaround to run pulseaudio correctly: sudo mkdir -p /run/user/0/snap.pulseaudio/pulse - 3. Find out corrent source, sink id: + 3. Find out current source, sink id: sudo pulseaudio.pactl list 4. Set input/output profile: sudo pulseaudio.pactl set-card-profile 0 output:hdmi-stereo+input:analog-stereo 5. Unmute and set volume of output: - sudo pulseaudio.pactl set-sink-mute 0 - sudo pulseaudio.pactl set-sink-volume 80% + sudo pulseaudio.pactl set-sink-mute 0 + sudo pulseaudio.pactl set-sink-volume 80% 6. Put a test wav file in system as /var/snap/pulseaudio/common/test.wav 7. Play the test wav file sudo pulseaudio.paplay -v /var/snap/pulseaudio/common/test.wav diff --git a/providers/base/units/audio/manifest.pxu b/providers/base/units/audio/manifest.pxu index 73ad218512..c01fc9b16e 100644 --- a/providers/base/units/audio/manifest.pxu +++ b/providers/base/units/audio/manifest.pxu @@ -13,9 +13,12 @@ unit: manifest entry id: has_audio_capture _name: Audio capture value-type: bool +_summary: Determine if audio capture is available. unit: manifest entry id: has_audio_loopback_connector _prompt: Does this device have the following?: _name: Audio Loopback Connector value-type: bool +_summary: Check if the device has an Audio Loopback Connector. + diff --git a/providers/base/units/audio/test-plan.pxu b/providers/base/units/audio/test-plan.pxu index 564a88d6f4..1c0931f89b 100644 --- a/providers/base/units/audio/test-plan.pxu +++ b/providers/base/units/audio/test-plan.pxu @@ -8,6 +8,7 @@ include: nested_part: com.canonical.certification::audio-cert-manual com.canonical.certification::audio-cert-automated +_summary: Execute audio tests and see Monitor / Graphic test plans for hybrid-graphic monitor audio tests. id: audio-cert-manual unit: test plan From a5f63a9ab09922957cf06d3271f84a42acb426f8 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Tue, 19 Mar 2024 12:45:17 +0100 Subject: [PATCH 097/108] Added 10m timeout to eddystone bluetooth test (bugfix) (#1086) * Added 10m timeout to eddystone bluetooth test * Add tests for eddystone_scanner Minor: black eddystone scanner --- .../scripts/eddystone_scanner.py | 62 +++++++++------- .../tests/test_eddystone_scanner.py | 70 +++++++++++++++++++ 2 files changed, 107 insertions(+), 25 deletions(-) create mode 100644 checkbox-support/checkbox_support/tests/test_eddystone_scanner.py diff --git a/checkbox-support/checkbox_support/scripts/eddystone_scanner.py b/checkbox-support/checkbox_support/scripts/eddystone_scanner.py index be9cf9c83d..079848cc00 100644 --- a/checkbox-support/checkbox_support/scripts/eddystone_scanner.py +++ b/checkbox-support/checkbox_support/scripts/eddystone_scanner.py @@ -19,38 +19,40 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import argparse +import sys import time +import argparse from checkbox_support.vendor.beacontools import ( - BeaconScanner, EddystoneURLFrame) + BeaconScanner, + EddystoneURLFrame, +) +from checkbox_support.helpers.timeout import timeout from checkbox_support.interactive_cmd import InteractiveCommand def init_bluetooth(): # Power on the bluetooth controller - with InteractiveCommand('bluetoothctl') as btctl: - btctl.writeline('power on') + with InteractiveCommand("bluetoothctl") as btctl: + btctl.writeline("power on") time.sleep(3) - btctl.writeline('scan on') + btctl.writeline("scan on") time.sleep(3) - btctl.writeline('exit') + btctl.writeline("exit") btctl.kill() def beacon_scan(hci_device): TIMEOUT = 10 - beacon_mac = beacon_rssi = beacon_packet = '' + beacon_mac = beacon_rssi = beacon_packet = "" def callback(bt_addr, rssi, packet, additional_info): nonlocal beacon_mac, beacon_rssi, beacon_packet beacon_mac, beacon_rssi, beacon_packet = bt_addr, rssi, packet scanner = BeaconScanner( - callback, - bt_device_id=hci_device, - packet_filter=EddystoneURLFrame + callback, bt_device_id=hci_device, packet_filter=EddystoneURLFrame ) scanner.start() @@ -59,27 +61,34 @@ def callback(bt_addr, rssi, packet, additional_info): time.sleep(1) scanner.stop() if beacon_packet: - print('Eddystone beacon detected: URL: {} ' - ''.format(beacon_packet.url, beacon_mac, beacon_rssi)) + print( + "Eddystone beacon detected: URL: {} " + "".format(beacon_packet.url, beacon_mac, beacon_rssi) + ) return 0 - print('No EddyStone URL advertisement detected!') + print("No EddyStone URL advertisement detected!") return 1 -def main(): +@timeout(60 * 10) # 10 minutes timeout +def main(argv): init_bluetooth() parser = argparse.ArgumentParser( - description="Track BLE advertised packets") - parser.add_argument("-D", "--device", default='hci0', - help="Select the hciX device to use " - "(default hci0).") - args = parser.parse_args() + description="Track BLE advertised packets" + ) + parser.add_argument( + "-D", + "--device", + default="hci0", + help="Select the hciX device to use " "(default hci0).", + ) + args = parser.parse_args(argv) try: - hci_device = int(args.device.replace('hci', '')) + hci_device = int(args.device.replace("hci", "")) except ValueError: - print('Bad device argument, defaulting to hci0') + print("Bad device argument, defaulting to hci0") hci_device = 0 # Newer bluetooth controllers and bluez versions allow extended commands @@ -90,13 +99,16 @@ def main(): # Try the newest one first, then the older one if that doesn't work rc = beacon_scan(hci_device) if rc: - print('Trying again with older beacontools version...') + print("Trying again with older beacontools version...") global BeaconScanner, EddystoneURLFrame from checkbox_support.vendor.beacontools_2_0_2 import ( - BeaconScanner, EddystoneURLFrame) + BeaconScanner, + EddystoneURLFrame, + ) + rc = beacon_scan(hci_device) return rc -if __name__ == '__main__': - raise SystemExit(main()) +if __name__ == "__main__": + raise SystemExit(main(sys.argv[1:])) diff --git a/checkbox-support/checkbox_support/tests/test_eddystone_scanner.py b/checkbox-support/checkbox_support/tests/test_eddystone_scanner.py new file mode 100644 index 0000000000..79e9f22588 --- /dev/null +++ b/checkbox-support/checkbox_support/tests/test_eddystone_scanner.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# encoding: UTF-8 +# Copyright (c) 2024 Canonical Ltd. +# +# Authors: +# Massimiliano Girardi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +import unittest +from unittest.mock import patch, MagicMock + +from checkbox_support.scripts import eddystone_scanner + + +class TestEddystoneScanner(unittest.TestCase): + @patch("checkbox_support.scripts.eddystone_scanner.BeaconScanner") + def test_beacon_scan_ok(self, mock_beacon_scanner): + class BeaconScanner: + def __init__(self, callback, *args, **kwargs): + self.callback = callback + + def start(self): + packet = MagicMock(url="packet_url") + self.callback("address", "rssi", packet, None) + + def stop(self): + pass + + mock_beacon_scanner.side_effect = BeaconScanner + self.assertEqual(eddystone_scanner.beacon_scan("1"), 0) + + @patch("checkbox_support.scripts.eddystone_scanner.BeaconScanner") + @patch("time.time") + @patch("time.sleep") + def test_beacon_scan_fail( + self, mock_sleep, mock_time, mock_beacon_scanner + ): + mock_time.side_effect = [0, 60 * 60 * 60] # 60h, trigger timeout + self.assertEqual(eddystone_scanner.beacon_scan("1"), 1) + + @patch("checkbox_support.scripts.eddystone_scanner.BeaconScanner") + @patch("checkbox_support.scripts.eddystone_scanner.InteractiveCommand") + @patch("time.sleep") + def test_main_ok( + self, mock_sleep, mock_interactive_command, mock_beacon_scanner + ): + class BeaconScanner: + def __init__(self, callback, *args, **kwargs): + self.callback = callback + + def start(self): + packet = MagicMock(url="packet_url") + self.callback("address", "rssi", packet, None) + + def stop(self): + pass + + mock_beacon_scanner.side_effect = BeaconScanner + self.assertEqual(eddystone_scanner.main(["--device", "hc1"]), 0) From b5317f4c60b8f9d47254711ebdc1c89539146eeb Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Tue, 19 Mar 2024 19:45:42 +0800 Subject: [PATCH 098/108] Take inline overrides into account in mandatory_include and bootstrap_include sections of a test plan (BugFix) (#1079) * Take inline overrides into account in mandatory_include section of a test plan Although inline overrides were taking into account in the `include` section of a test plan, it was not the case for the `mandatory_include` section. So a test plan like this: unit: test plan id: test-plan name: My test plan mandatory_include: mandatory-cert-blocker-job certification_status=blocker include: regular-cert-blocker-job certification_status=blocker would only make "regular-cert-blocker-job" a cert-blocker, not "mandatory-cert-blocker-job". * Add support for bootstrap_include section - Move the V class one level above, so it can be used for the different include sections of the test plan. - Fix issue found by Max (`mandatory_include` was parsed only if there was an `include` section) - Add support for `bootstrap_include` for good measure * Add unit test to check bootstrap_include inline overrides --- .../plainbox/impl/unit/test_testplan.py | 37 +++++++++++++++++++ checkbox-ng/plainbox/impl/unit/testplan.py | 36 ++++++++++-------- 2 files changed, 57 insertions(+), 16 deletions(-) diff --git a/checkbox-ng/plainbox/impl/unit/test_testplan.py b/checkbox-ng/plainbox/impl/unit/test_testplan.py index 73d76b4d3d..b817bc736e 100644 --- a/checkbox-ng/plainbox/impl/unit/test_testplan.py +++ b/checkbox-ng/plainbox/impl/unit/test_testplan.py @@ -508,3 +508,40 @@ def test_nested_tesplan__multiple_namespaces(self): self.assertIsInstance(qual_list[1].matcher, OperatorMatcher) self.assertEqual(qual_list[1].matcher.value, 'ns2::Bar') self.assertEqual(qual_list[1].inclusive, True) + + +class TestTestPlanUnitSupport(TestCase): + + def setUp(self): + self.tp1 = TestPlanUnit({ + "id": "tp1", + "unit": "test plan", + "name": "An example test plan 1", + "bootstrap_include": "bootstrap_job certification_status=blocker", + "mandatory_include": "mandatory_job certification_status=blocker", + "include": "job1 certification_status=non-blocker", + }) + self.tp2 = TestPlanUnit({ + "id": "tp1", + "unit": "test plan", + "name": "An example test plan 2", + "include": "job1 certification_status=blocker", + }) + + def test_inline_override(self): + support_tp1 = TestPlanUnitSupport(self.tp1) + support_tp2 = TestPlanUnitSupport(self.tp2) + self.assertEqual( + support_tp1.override_list, + [ + ("^bootstrap_job$", [("certification_status", "blocker")]), + ("^job1$", [("certification_status", "non-blocker")]), + ("^mandatory_job$", [("certification_status", "blocker")]), + ], + ) + self.assertEqual( + support_tp2.override_list, + [ + ("^job1$", [("certification_status", "blocker")]), + ], + ) diff --git a/checkbox-ng/plainbox/impl/unit/testplan.py b/checkbox-ng/plainbox/impl/unit/testplan.py index 96b5c9ae3a..efbb973154 100644 --- a/checkbox-ng/plainbox/impl/unit/testplan.py +++ b/checkbox-ng/plainbox/impl/unit/testplan.py @@ -826,23 +826,27 @@ def _get_inline_overrides( collected into a list of tuples ``(field, value)`` and this list is subsequently packed into a tuple ``(pattern, field_value_list)``. """ - override_list = [] - if testplan.include is not None: - - class V(Visitor): - - def visit_IncludeStmt_node(self, node: IncludeStmt): - if not node.overrides: - return - pattern = r"^{}$".format( - testplan.qualify_id(node.pattern.text)) - field_value_list = [ - (override_exp.field.text.replace('-', '_'), - override_exp.value.text) - for override_exp in node.overrides] - override_list.append((pattern, field_value_list)) + class V(Visitor): - V().visit(IncludeStmtList.parse(testplan.include)) + def visit_IncludeStmt_node(self, node: IncludeStmt): + if not node.overrides: + return + pattern = r"^{}$".format( + testplan.qualify_id(node.pattern.text)) + field_value_list = [ + (override_exp.field.text.replace('-', '_'), + override_exp.value.text) + for override_exp in node.overrides] + override_list.append((pattern, field_value_list)) + override_list = [] + include_sections = ( + testplan.bootstrap_include, + testplan.mandatory_include, + testplan.include, + ) + for section in include_sections: + if section: + V().visit(IncludeStmtList.parse(section)) for tp_unit in testplan.get_nested_part(): override_list.extend(self._get_inline_overrides(tp_unit)) return override_list From 73594f63ec01dcf72b054ab9a8f5d06168ef21dd Mon Sep 17 00:00:00 2001 From: kissiel Date: Tue, 19 Mar 2024 13:50:57 +0100 Subject: [PATCH 099/108] Fix erroneous additions to PXUs (BugFix) (#1087) * rollback unwanted changes * use correct spelling for check audio daemon --- ..._audio_deamon.sh => check_audio_daemon.sh} | 0 providers/base/units/audio/jobs.pxu | 40 +++++++++---------- providers/base/units/audio/manifest.pxu | 3 -- providers/base/units/audio/test-plan.pxu | 1 - providers/base/units/bluetooth/jobs.pxu | 4 +- providers/base/units/dock/jobs.pxu | 40 +++++++++---------- 6 files changed, 42 insertions(+), 46 deletions(-) rename providers/base/bin/{check_audio_deamon.sh => check_audio_daemon.sh} (100%) diff --git a/providers/base/bin/check_audio_deamon.sh b/providers/base/bin/check_audio_daemon.sh similarity index 100% rename from providers/base/bin/check_audio_deamon.sh rename to providers/base/bin/check_audio_daemon.sh diff --git a/providers/base/units/audio/jobs.pxu b/providers/base/units/audio/jobs.pxu index c0061b2163..1f1c9c7f10 100644 --- a/providers/base/units/audio/jobs.pxu +++ b/providers/base/units/audio/jobs.pxu @@ -43,7 +43,7 @@ requires: package.name == 'gir1.2-gst-plugins-base-0.10' or package.name == 'gir1.2-gst-plugins-base-1.0' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -83,7 +83,7 @@ requires: package.name in ['pulseaudio-utils', 'pipewire'] flags: also-after-suspend command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -119,7 +119,7 @@ requires: package.name in ['pulseaudio-utils', 'pipewire'] flags: also-after-suspend command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -155,7 +155,7 @@ requires: package.name in ['pulseaudio-utils', 'pipewire'] flags: also-after-suspend command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -191,7 +191,7 @@ requires: package.name in ['pulseaudio-utils', 'pipewire'] flags: also-after-suspend command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -227,7 +227,7 @@ requires: package.name in ['pulseaudio-utils', 'pipewire'] flags: also-after-suspend command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -263,7 +263,7 @@ requires: package.name in ['pulseaudio-utils', 'pipewire'] flags: also-after-suspend command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -297,7 +297,7 @@ requires: package.name == 'gir1.2-gst-plugins-base-0.10' or package.name == 'gir1.2-gst-plugins-base-1.0' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -331,7 +331,7 @@ requires: package.name == 'gstreamer1.0-plugins-good' or package.name == 'gstreamer0.10-plugins-good' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio alsa_record_playback.sh EXIT_CODE=$? @@ -366,7 +366,7 @@ requires: package.name == 'gstreamer1.0-plugins-good' or package.name == 'gstreamer0.10-plugins-good' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio alsa_record_playback.sh EXIT_CODE=$? @@ -398,7 +398,7 @@ requires: package.name == 'gstreamer1.0-plugins-good' or package.name == 'gstreamer0.10-plugins-good' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then alsa_record_playback.sh EXIT_CODE=$? else @@ -435,7 +435,7 @@ requires: device.category == 'AUDIO' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_test.py else audio_test.py @@ -453,7 +453,7 @@ requires: package.name in ['pulseaudio-utils', 'pipewire'] manifest.has_audio_playback == 'True' command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py detect -t audio -c sinks else pactl_list.sh sinks @@ -471,7 +471,7 @@ requires: package.name in ['pulseaudio-utils', 'pipewire'] manifest.has_audio_capture == 'True' command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py detect -t audio -c sources else pactl_list.sh sources @@ -561,7 +561,7 @@ requires: package.name == 'gstreamer1.0-plugins-good' or package.name == 'gstreamer0.10-plugins-good' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio alsa_record_playback.sh EXIT_CODE=$? @@ -650,7 +650,7 @@ requires: package.name == 'gir1.2-gst-plugins-base-0.10' or package.name == 'gir1.2-gst-plugins-base-1.0' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 10 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -685,7 +685,7 @@ requires: package.name == 'gir1.2-gst-plugins-base-0.10' or package.name == 'gir1.2-gst-plugins-base-1.0' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 10 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -732,7 +732,7 @@ requires: package.name == 'gir1.2-gst-plugins-base-0.10' or package.name == 'gir1.2-gst-plugins-base-1.0' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -771,7 +771,7 @@ requires: package.name in ['pulseaudio-utils', 'pipewire'] device.category == 'AUDIO' command: audio_test.py - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_test.py else audio_test.py @@ -786,7 +786,7 @@ estimated_duration: 1.0 requires: package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py detect -t audio -c sinks else pactl_list.sh sinks diff --git a/providers/base/units/audio/manifest.pxu b/providers/base/units/audio/manifest.pxu index c01fc9b16e..73ad218512 100644 --- a/providers/base/units/audio/manifest.pxu +++ b/providers/base/units/audio/manifest.pxu @@ -13,12 +13,9 @@ unit: manifest entry id: has_audio_capture _name: Audio capture value-type: bool -_summary: Determine if audio capture is available. unit: manifest entry id: has_audio_loopback_connector _prompt: Does this device have the following?: _name: Audio Loopback Connector value-type: bool -_summary: Check if the device has an Audio Loopback Connector. - diff --git a/providers/base/units/audio/test-plan.pxu b/providers/base/units/audio/test-plan.pxu index 1c0931f89b..564a88d6f4 100644 --- a/providers/base/units/audio/test-plan.pxu +++ b/providers/base/units/audio/test-plan.pxu @@ -8,7 +8,6 @@ include: nested_part: com.canonical.certification::audio-cert-manual com.canonical.certification::audio-cert-automated -_summary: Execute audio tests and see Monitor / Graphic test plans for hybrid-graphic monitor audio tests. id: audio-cert-manual unit: test plan diff --git a/providers/base/units/bluetooth/jobs.pxu b/providers/base/units/bluetooth/jobs.pxu index 9ceea173e4..5dc06b0265 100644 --- a/providers/base/units/bluetooth/jobs.pxu +++ b/providers/base/units/bluetooth/jobs.pxu @@ -104,7 +104,7 @@ requires: manifest.has_bt_smart == 'True' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -457,7 +457,7 @@ requires: manifest.has_bt_smart == 'True' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio alsa_record_playback.sh EXIT_CODE=$? diff --git a/providers/base/units/dock/jobs.pxu b/providers/base/units/dock/jobs.pxu index 424342924e..279e463c77 100644 --- a/providers/base/units/dock/jobs.pxu +++ b/providers/base/units/dock/jobs.pxu @@ -259,7 +259,7 @@ plugin: user-interact-verify flags: also-after-suspend estimated_duration: 30.0 command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -293,7 +293,7 @@ plugin: user-interact-verify flags: also-after-suspend estimated_duration: 30.0 command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -328,7 +328,7 @@ _summary: HDMI audio test plugin: user-interact-verify estimated_duration: 30.0 command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -364,7 +364,7 @@ plugin: user-interact-verify flags: also-after-suspend estimated_duration: 30.0 command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -400,7 +400,7 @@ plugin: user-interact-verify flags: also-after-suspend estimated_duration: 30.0 command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -435,7 +435,7 @@ requires: package.name == 'gir1.2-gst-plugins-base-0.10' or package.name == 'gir1.2-gst-plugins-base-1.0' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -470,7 +470,7 @@ requires: package.name == 'gstreamer1.0-plugins-good' or package.name == 'gstreamer0.10-plugins-good' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio alsa_record_playback.sh EXIT_CODE=$? @@ -544,7 +544,7 @@ requires: package.name == 'gstreamer1.0-plugins-good' or package.name == 'gstreamer0.10-plugins-good' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio alsa_record_playback.sh EXIT_CODE=$? @@ -576,7 +576,7 @@ requires: device.category == 'AUDIO' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py monitor -t 30 -m sinks else pulse_active_port_change.py sinks @@ -604,7 +604,7 @@ requires: device.category == 'AUDIO' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py monitor -t 30 -m sources else pulse_active_port_change.py sources @@ -1193,7 +1193,7 @@ plugin: user-interact-verify flags: also-after-suspend estimated_duration: 30.0 command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -1227,7 +1227,7 @@ plugin: user-interact-verify flags: also-after-suspend estimated_duration: 30.0 command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -1261,7 +1261,7 @@ plugin: user-interact-verify flags: also-after-suspend estimated_duration: 30.0 command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -1295,7 +1295,7 @@ plugin: user-interact-verify flags: also-after-suspend estimated_duration: 30.0 command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 --device hdmi 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -1829,7 +1829,7 @@ requires: package.name in ['pulseaudio-utils', 'pipewire'] device.category == 'AUDIO' command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_test.py else audio_test.py @@ -2545,7 +2545,7 @@ _summary: Multiple monitor audio test plugin: user-interact-verify estimated_duration: 60.0 command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py through -m sink -c "speaker-test -c 2 -l 1 -t wav" else indexes=$(pacmd list-sinks | grep -e 'index' -e 'available' | grep -B 1 -e 'available: unknown' -e 'available: yes' | grep index | awk '{print $NF}') @@ -2858,7 +2858,7 @@ requires: device.category == 'AUDIO' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py monitor -t 30 -m sinks else pulse_active_port_change.py sinks @@ -2894,7 +2894,7 @@ requires: package.name == 'gir1.2-gst-plugins-base-0.10' or package.name == 'gir1.2-gst-plugins-base-1.0' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio pipewire_utils.py gst -t 2 'audiotestsrc wave=sine freq=512 ! audioconvert ! audioresample ! autoaudiosink' EXIT_CODE=$? @@ -2934,7 +2934,7 @@ requires: device.category == 'AUDIO' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py monitor -t 30 -m sources else pulse_active_port_change.py sources @@ -2971,7 +2971,7 @@ requires: package.name == 'gstreamer1.0-plugins-good' or package.name == 'gstreamer0.10-plugins-good' package.name in ['pulseaudio-utils', 'pipewire'] command: - if check_audio_deamon.sh ; then + if check_audio_daemon.sh ; then pipewire_utils.py show -t audio alsa_record_playback.sh EXIT_CODE=$? From c4659a779ad04eeddcdd32b8a65f0ff288020cb3 Mon Sep 17 00:00:00 2001 From: stanley31huang Date: Tue, 19 Mar 2024 21:47:24 +0800 Subject: [PATCH 100/108] [checkbox-ce-oem] add iio sensors test (New) (#1036) * Add: implement iio sensors test implement iio sensor tests --- .../bin/iio_sensor_test.py | 238 ++++++++++++++++++ .../tests/test_iio_sensor_test.py | 225 +++++++++++++++++ .../units/iio-sensors/category.pxu | 3 + .../units/iio-sensors/jobs.pxu | 31 +++ .../units/iio-sensors/manifest.pxu | 4 + .../units/iio-sensors/test-plan.pxu | 42 ++++ .../units/test-plan-ce-oem.pxu | 4 + 7 files changed, 547 insertions(+) create mode 100755 contrib/checkbox-provider-ce-oem/bin/iio_sensor_test.py create mode 100755 contrib/checkbox-provider-ce-oem/tests/test_iio_sensor_test.py create mode 100644 contrib/checkbox-provider-ce-oem/units/iio-sensors/category.pxu create mode 100644 contrib/checkbox-provider-ce-oem/units/iio-sensors/jobs.pxu create mode 100644 contrib/checkbox-provider-ce-oem/units/iio-sensors/manifest.pxu create mode 100644 contrib/checkbox-provider-ce-oem/units/iio-sensors/test-plan.pxu diff --git a/contrib/checkbox-provider-ce-oem/bin/iio_sensor_test.py b/contrib/checkbox-provider-ce-oem/bin/iio_sensor_test.py new file mode 100755 index 0000000000..7636ac28ea --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/bin/iio_sensor_test.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python3 +# This file is part of Checkbox. +# +# Copyright 2024 Canonical Ltd. +# Written by: +# Stanley Huang +# +# Checkbox is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, +# as published by the Free Software Foundation. +# +# Checkbox is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Checkbox. If not, see . +import re +import argparse +from pathlib import Path + + +IIO_PATH = "/sys/bus/iio/devices/iio:device" + +pressure_nodes = [ + "in_pressure_input", + "in_pressure_oversampling_ratio", + "in_pressure_sampling_frequency" +] +accelerometer_nodes = [ + "in_accel_sampling_frequency", + "in_accel_scale", + "in_accel_x_calibbias", + "in_accel_x_raw", + "in_accel_y_calibbias", + "in_accel_y_raw", + "in_accel_z_calibbias", + "in_accel_z_raw" +] +humidity_nodes = [ + "in_humidityrelative_integration_time", + "in_humidityrelative_scale", + "in_humidityrelative_raw", +] + + +def _check_node(path): + """ + Initial a Path object for the industrial I/O sensor + + Args: + path (str): the full path of the industrial I/O sensor + + Raises: + FileNotFoundError: the sysfs of sensor not exists + + Returns: + iio_node: the node of the industrial I/O sensor. (Path object) + """ + iio_node = Path(path) + if not iio_node.exists(): + raise FileNotFoundError("{} file not exists".format(iio_node)) + + return iio_node + + +def _check_reading(values): + """ + Check the format of sensor reading + + Args: + values (list): a list of sensor reading + + Returns: + bool: True if all reading match expected format + """ + result = True + reading_pattern = r"^[+-]?\d+(\.[0-9]+)?$" + for value in values: + if re.search(reading_pattern, value) is None: + result = False + + return result + + +def check_pressure_sensor(index): + """ + Validate the sysfs of industrial I/O pressure sensor + + Args: + index (str): the index of sensor + + Raises: + ValueError: the reading of sensor is not expected format + """ + iio_node = _check_node(IIO_PATH + index) + readings = [] + + for sub_node in pressure_nodes: + tmp_node = iio_node.joinpath(sub_node) + _check_node(tmp_node) + value = tmp_node.read_text().strip("\n") + print("The value of {} node is {}".format(tmp_node, value)) + readings.append(value) + + if readings and _check_reading(readings): + print("The pressure sensor test passed") + else: + raise ValueError("ERROR: The pressure value is not valid") + + +def check_accelerometer_sensor(index): + """ + Validate the sysfs of industrial I/O accelerometer sensor + + Args: + index (str): the index of sensor + + Raises: + ValueError: the reading of sensor is not expected format + """ + readings = [] + iio_node = _check_node(IIO_PATH + index) + + for sub_node in accelerometer_nodes: + tmp_node = iio_node.joinpath(sub_node) + _check_node(tmp_node) + + value = tmp_node.read_text().strip("\n") + print("the value of {} node is {}".format(tmp_node, value)) + readings.append(value) + + if readings and _check_reading(readings): + print("The accelerometer sensor test passed") + else: + raise ValueError("ERROR: The accelerometer value is not valid") + + +def check_humidity_sensor(index): + """ + Validate the sysfs of industrial I/O humidity sensor + + Args: + index (str): the index of sensor + + Raises: + ValueError: the reading of sensor is not expected format + """ + readings = [] + iio_node = _check_node(IIO_PATH + index) + + for sub_node in humidity_nodes: + tmp_node = iio_node.joinpath(sub_node) + _check_node(tmp_node) + value = tmp_node.read_text().strip("\n") + print("the value of {} node is {}".format(tmp_node, value)) + readings.append(value) + + if readings and _check_reading(readings): + print("The humidity sensor test passed") + else: + raise ValueError("ERROR: The humidity value is not valid") + + +def validate_iio_sensor(args): + """ + Check sensor and validate the format of reading + + Args: + args (Namespace): the arguments includes type and index of sensor + """ + test_funcs = { + "pressure": check_pressure_sensor, + "accelerometer": check_accelerometer_sensor, + "humidityrelative": check_humidity_sensor + } + + print("# Perform {} sensor test - index {}".format(args.type, args.index)) + test_funcs[args.type](args.index) + print("# The {} sensor test passed".format(args.type)) + + +def dump_sensor_resource(args): + """ + Print out the sensor index and sensor type + + Args: + args (Namespace): the arguments includes type and index of sensor + """ + output = "" + resource_text = "index: {}\ntype: {}\n\n" + for sensor in args.mapping.split(): + index, sensor_type = sensor.split(":") + output += resource_text.format(index, sensor_type) + print(output) + + +def register_arguments(): + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description='Industrial IO sensor tests') + + sub_parsers = parser.add_subparsers(dest="test_func") + sub_parsers.required = True + + iio_test_parser = sub_parsers.add_parser("test") + iio_test_parser.add_argument( + "-t", "--type", + required=True, + choices=["pressure", "accelerometer", "humidityrelative"], + type=str + ) + iio_test_parser.add_argument( + "-i", "--index", + required=True, + type=str, + ) + iio_test_parser.set_defaults(test_func=validate_iio_sensor) + + iio_arg_parser = sub_parsers.add_parser("sensor-resource") + iio_arg_parser.add_argument( + "mapping", + help=("Usage of parameter: IIO_SENSORS=" + "{index}:{sensor_type} {index}:{sensor_type}") + ) + + iio_arg_parser.set_defaults(test_func=dump_sensor_resource) + + args = parser.parse_args() + return args + + +if __name__ == "__main__": + + args = register_arguments() + + args.test_func(args) diff --git a/contrib/checkbox-provider-ce-oem/tests/test_iio_sensor_test.py b/contrib/checkbox-provider-ce-oem/tests/test_iio_sensor_test.py new file mode 100755 index 0000000000..c2db93214e --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/tests/test_iio_sensor_test.py @@ -0,0 +1,225 @@ +import unittest +import sys +import argparse +from pathlib import Path +from io import StringIO +from contextlib import redirect_stdout +from unittest.mock import patch, Mock + +import iio_sensor_test + + +class TestIndustrialIOSensorTest(unittest.TestCase): + + @patch("pathlib.Path.exists") + def test_check_root_node_exists(self, mock_path): + mock_path.return_value = True + node = iio_sensor_test._check_node("iio_sensor1") + + self.assertIsInstance(node, Path) + + @patch("pathlib.Path.exists") + def test_check_root_node_not_exists(self, mock_path): + mock_path.return_value = False + + with self.assertRaises(FileNotFoundError): + iio_sensor_test._check_node("iio_sensor1") + + def test_check_reading_is_expected(self): + readings = ["20.1", "-255", "+123.3"] + + self.assertTrue(iio_sensor_test._check_reading(readings)) + + def test_check_reading_not_expected(self): + readings = ["20.1", "-255", "+a"] + + self.assertFalse(iio_sensor_test._check_reading(readings)) + + @patch("iio_sensor_test._check_reading") + @patch("pathlib.Path.read_text") + @patch("iio_sensor_test._check_node") + def test_check_pressure_sensor( + self, mock_check_node, mock_read, mock_check_reading): + mock_check_node.return_value = Path("fake") + + mock_check_node.return_value = Path("fake") + mock_read.side_effect = ["20.1", "-255", "+123.3"] + + with redirect_stdout(StringIO()): + iio_sensor_test.check_pressure_sensor("iio_sensor1") + + self.assertEqual(mock_check_node.call_count, 4) + self.assertEqual(mock_read.call_count, 3) + self.assertEqual(mock_check_reading.call_count, 1) + + @patch("pathlib.Path.read_text") + @patch("iio_sensor_test._check_node") + def test_check_pressure_sensor_unexpected_value( + self, mock_check_node, mock_read): + + mock_check_node.return_value = Path("fake") + mock_read.side_effect = ["20.1", "-255", "+a"] + with redirect_stdout(StringIO()): + with self.assertRaises(ValueError): + iio_sensor_test.check_pressure_sensor("iio_sensor1") + + @patch("iio_sensor_test._check_reading") + @patch("pathlib.Path.read_text") + @patch("iio_sensor_test._check_node") + def test_check_accelerometer_sensor( + self, mock_check_node, mock_read, mock_check_reading): + + mock_check_node.return_value = Path("fake") + mock_read.side_effect = [ + "20.1", "-255", "+123.3", "1", + "509", "-0.1235", "+0.2222", "6666" + ] + + with redirect_stdout(StringIO()): + iio_sensor_test.check_accelerometer_sensor("iio_sensor1") + + self.assertEqual(mock_check_node.call_count, 9) + self.assertEqual(mock_read.call_count, 8) + self.assertEqual(mock_check_reading.call_count, 1) + + @patch("pathlib.Path.read_text") + @patch("iio_sensor_test._check_node") + def test_check_accelerometer_sensor_unexpected_value( + self, mock_check_node, mock_read): + + mock_check_node.return_value = Path("fake") + mock_read.side_effect = [ + "d20.1", "-255", "+123.3", "1", + "5d09", "-0a.1235", "+0.2222", "6666" + ] + with redirect_stdout(StringIO()): + with self.assertRaises(ValueError): + iio_sensor_test.check_accelerometer_sensor( + "iio_sensor1") + + @patch("iio_sensor_test._check_reading") + @patch("pathlib.Path.read_text") + @patch("iio_sensor_test._check_node") + def test_check_humidity_sensor( + self, mock_check_node, mock_read, mock_check_reading): + + mock_check_node.return_value = Path("fake") + mock_read.side_effect = ["20.1", "-255", "+123.3"] + + with redirect_stdout(StringIO()): + iio_sensor_test.check_humidity_sensor("iio_sensor1") + + self.assertEqual(mock_check_node.call_count, 4) + self.assertEqual(mock_read.call_count, 3) + self.assertEqual(mock_check_reading.call_count, 1) + + @patch("pathlib.Path.read_text") + @patch("iio_sensor_test._check_node") + def test_check_humidity_sensor_unexpected_value( + self, mock_check_node, mock_read): + + mock_check_node.return_value = Path("fake") + mock_read.side_effect = ["20.d1", "-255", "+a"] + with redirect_stdout(StringIO()): + with self.assertRaises(ValueError): + iio_sensor_test.check_humidity_sensor("iio_sensor1") + + @patch("iio_sensor_test.check_pressure_sensor") + def test_validate_iio_sensor_test(self, mock_func): + mock_args = Mock( + return_value=argparse.Namespace( + type="pressure", + index="0") + ) + mock_func.return_value = True + + with redirect_stdout(StringIO()): + iio_sensor_test.validate_iio_sensor(mock_args()) + mock_func.assert_called_once_with( + mock_args().index) + + def test_sensor_resource(self): + mock_args = Mock( + return_value=argparse.Namespace( + mapping="0:pressure 1:accelerometer 2:humidityrelative") + ) + with redirect_stdout(StringIO()) as stdout: + iio_sensor_test.dump_sensor_resource(mock_args()) + + self.assertEqual( + stdout.getvalue(), + ( + "index: 0\n" + "type: pressure\n\n" + "index: 1\n" + "type: accelerometer\n\n" + "index: 2\n" + "type: humidityrelative\n\n\n" + ) + ) + + def test_sensor_resource_with_unexpected_format(self): + mock_args = Mock( + return_value=argparse.Namespace( + mapping="0:pressure:error") + ) + + with self.assertRaises(ValueError) as context: + iio_sensor_test.dump_sensor_resource(mock_args()) + + self.assertEqual( + str(context.exception), + "too many values to unpack (expected 2)" + ) + + +class TestArgumentParser(unittest.TestCase): + + def test_pressure_parser(self): + sys.argv = [ + "iio_sensor_test.py", "test", "-t", "pressure", + "-i", "3" + ] + args = iio_sensor_test.register_arguments() + + self.assertEqual(args.test_func, + iio_sensor_test.validate_iio_sensor) + self.assertEqual(args.type, "pressure") + self.assertEqual(args.index, "3") + + def test_accelerometer_parser(self): + sys.argv = [ + "iio_sensor_test.py", "test", "-t", "accelerometer", + "-i", "3" + ] + args = iio_sensor_test.register_arguments() + + self.assertEqual(args.test_func, + iio_sensor_test.validate_iio_sensor) + self.assertEqual(args.type, "accelerometer") + self.assertEqual(args.index, "3") + + def test_humidityrelative_parser(self): + sys.argv = [ + "iio_sensor_test.py", "test", + "--type", "humidityrelative", + "--index", "3" + ] + args = iio_sensor_test.register_arguments() + + self.assertEqual(args.test_func, + iio_sensor_test.validate_iio_sensor) + self.assertEqual(args.type, "humidityrelative") + self.assertEqual(args.index, "3") + + def test_iio_sensore_resource_parser(self): + sys.argv = [ + "iio_sensor_test.py", + "sensor-resource", + "0:pressure 1:accelerometer 2:humidityrelative" + ] + args = iio_sensor_test.register_arguments() + + self.assertEqual(args.test_func, + iio_sensor_test.dump_sensor_resource) + self.assertEqual(args.mapping, sys.argv[2]) diff --git a/contrib/checkbox-provider-ce-oem/units/iio-sensors/category.pxu b/contrib/checkbox-provider-ce-oem/units/iio-sensors/category.pxu new file mode 100644 index 0000000000..c016a18146 --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/units/iio-sensors/category.pxu @@ -0,0 +1,3 @@ +unit: category +id: iio-sensors +_name: Industrial IO sensors Test diff --git a/contrib/checkbox-provider-ce-oem/units/iio-sensors/jobs.pxu b/contrib/checkbox-provider-ce-oem/units/iio-sensors/jobs.pxu new file mode 100644 index 0000000000..8354887a7b --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/units/iio-sensors/jobs.pxu @@ -0,0 +1,31 @@ +id: ce-oem-iio-sensors/resource +_summary: Generates a IIO sensors mapping for IIO sensor test +_description: + A IIO sensors mapping. By giving an IIO sensors on machnie to generates test jobs. + Usage of parameter: + IIO_SENSORS=device:sensor_type device:sensor_type ... + e.g. IIO_SENSORS=0:pressure 1:accelerometer 2:humidityrelative +estimated_duration: 0.02 +category_id: iio-sensors +plugin: resource +environ: IIO_SENSORS +command: + iio_sensor_test.py sensor-resource "$IIO_SENSORS" + +unit: template +template-engine: jinja2 +template-resource: ce-oem-iio-sensors/resource +template-unit: job +template-id: ce-oem-iio-sensors/check_sensor_type_index +id: ce-oem-iio-sensors/check-{{ type }}-{{ index }} +_summary: To test industrial IO {{ type }}-{{ index }} +plugin: shell +user: root +category_id: iio-sensors +estimated_duration: 40s +imports: from com.canonical.plainbox import manifest +requires: manifest.has_iio_sensors == 'True' +flags: also-after-suspend +command: + echo "## Perform the industrial I/O {{ type }}-{{ index }} sensor test" + iio_sensor_test.py test -t {{ type }} -i {{ index }} diff --git a/contrib/checkbox-provider-ce-oem/units/iio-sensors/manifest.pxu b/contrib/checkbox-provider-ce-oem/units/iio-sensors/manifest.pxu new file mode 100644 index 0000000000..e3e36478d9 --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/units/iio-sensors/manifest.pxu @@ -0,0 +1,4 @@ +unit: manifest entry +id: has_iio_sensors +_name: Does platform support industrial IO sensor? +value-type: bool diff --git a/contrib/checkbox-provider-ce-oem/units/iio-sensors/test-plan.pxu b/contrib/checkbox-provider-ce-oem/units/iio-sensors/test-plan.pxu new file mode 100644 index 0000000000..ee8c860b75 --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/units/iio-sensors/test-plan.pxu @@ -0,0 +1,42 @@ +id: ce-oem-iio-sensors-full +unit: test plan +_name: Industrial I/O sensor tests +_description: Full tests for Industrial I/O sensors +include: +nested_part: + ce-oem-iio-sensors-manual + ce-oem-iio-sensors-automated + +id: ce-oem-iio-sensors-manual +unit: test plan +_name: Industrial I/O sensor manual tests +_description: Manual tests for Industrial I/O sensors in before suspend and post suspend stage +include: + +id: ce-oem-iio-sensors-automated +unit: test plan +_name: Industrial I/O sensor auto tests +_description: Automated tests for Industrial I/O sensors in before suspend and post suspend stage + # Not nested this test plan for now due to it leads the mismatch job order +bootstrap_include: + ce-oem-iio-sensors/resource +include: + ce-oem-iio-sensors/check_sensor_type_index + +id: before-suspend-ce-oem-iio-sensors-automated +unit: test plan +_name: Industrial I/O sensor auto tests +_description: Automated tests for Industrial I/O sensors in before suspend stage +bootstrap_include: + ce-oem-iio-sensors/resource +include: + ce-oem-iio-sensors/check-.* + +id: after-suspend-ce-oem-iio-sensors-automated +unit: test plan +_name: Industrial I/O sensor auto tests +_description: Automated tests for Industrial I/O sensors in post suspend stage +bootstrap_include: + ce-oem-iio-sensors/resource +include: + after-suspend-ce-oem-iio-sensors/check-.* diff --git a/contrib/checkbox-provider-ce-oem/units/test-plan-ce-oem.pxu b/contrib/checkbox-provider-ce-oem/units/test-plan-ce-oem.pxu index feb7275571..1798632aa7 100644 --- a/contrib/checkbox-provider-ce-oem/units/test-plan-ce-oem.pxu +++ b/contrib/checkbox-provider-ce-oem/units/test-plan-ce-oem.pxu @@ -43,6 +43,7 @@ nested_part: ce-oem-touchscreen-evdev ce-oem-socketcan-manual com.canonical.certification::led-indicator-manual + ce-oem-iio-sensors-manual id: ce-oem-automated unit: test plan @@ -74,6 +75,7 @@ nested_part: ce-oem-socketcan-stress-automated ce-oem-ethernet-tcp-automated com.canonical.certification::eeprom-automated + before-suspend-ce-oem-iio-sensors-automated com.canonical.certification::rtc-automated id: after-suspend-ce-oem-manual @@ -102,6 +104,7 @@ nested_part: after-suspend-ce-oem-touchscreen-evdev after-suspend-ce-oem-socketcan-manual com.canonical.certification::after-suspend-led-indicator-manual + after-suspend-ce-oem-iio-sensors-manual id: after-suspend-ce-oem-automated unit: test plan @@ -132,6 +135,7 @@ nested_part: after-suspend-ce-oem-ethernet-tcp-automated com.canonical.certification::after-suspend-eeprom-automated com.canonical.certification::after-suspend-rtc-automated + after-suspend-ce-oem-iio-sensors-automated id: ce-oem-stress unit: test plan From bb5ae8ddc223a063a19c0e5807975071a5a16e88 Mon Sep 17 00:00:00 2001 From: LiaoU3 <58060146+LiaoU3@users.noreply.github.com> Date: Wed, 20 Mar 2024 11:05:38 +0800 Subject: [PATCH 101/108] Add MEI test cases and plans from checkbox-iiotg-provider and iiotg provider (New) (#900) * Add MEI test cases from checkbox-iiotg-provider and iiotg provider into base provider --------- Co-authored-by: Pierre Equoy --- providers/base/bin/mei.py | 99 ++++++++++++++++++++++++ providers/base/tests/test_mei.py | 101 +++++++++++++++++++++++++ providers/base/units/mei/category.pxu | 3 + providers/base/units/mei/jobs.pxu | 68 +++++++++++++++++ providers/base/units/mei/manifest.pxu | 4 + providers/base/units/mei/test-plan.pxu | 24 ++++++ providers/iiotg/units/mei/category.pxu | 3 - providers/iiotg/units/mei/jobs.pxu | 32 -------- 8 files changed, 299 insertions(+), 35 deletions(-) create mode 100755 providers/base/bin/mei.py create mode 100644 providers/base/tests/test_mei.py create mode 100644 providers/base/units/mei/category.pxu create mode 100644 providers/base/units/mei/jobs.pxu create mode 100644 providers/base/units/mei/manifest.pxu create mode 100644 providers/base/units/mei/test-plan.pxu delete mode 100644 providers/iiotg/units/mei/category.pxu delete mode 100644 providers/iiotg/units/mei/jobs.pxu diff --git a/providers/base/bin/mei.py b/providers/base/bin/mei.py new file mode 100755 index 0000000000..dd710e0acb --- /dev/null +++ b/providers/base/bin/mei.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 + +import array +import uuid +import fcntl +import struct +import os +from argparse import ArgumentParser + + +DEFAULT_MEI_NODE = "mei0" + + +class MEI_INTERFACE(): + + IOCTL_MEI_CONNECT_CLIENT = 0xc0104801 + + def __init__(self): + self._mei_obj = None + + def _get_mei(self): + path = "/dev" + devices = os.listdir(path) + if DEFAULT_MEI_NODE in devices: + return os.path.join(path, DEFAULT_MEI_NODE) + for device in devices: + if device.find("mei") != -1: + return os.path.join(path, device) + + def open(self): + mei_path = self._get_mei() + if mei_path is None: + raise SystemExit("MEI interface not found") + print("connecting to {}".format(mei_path)) + self._mei_obj = os.open(mei_path, os.O_RDWR) + + def connect(self, str_uuid): + obj_uuid = uuid.UUID(str_uuid) + array_data = array.array("b", obj_uuid.bytes_le) + fcntl.ioctl(self._mei_obj, + self.IOCTL_MEI_CONNECT_CLIENT, + array_data, 1) + max_length, version = struct.unpack("&1 || (echo "Unable retrieve MEI firmware version"; exit 1) diff --git a/providers/base/units/mei/manifest.pxu b/providers/base/units/mei/manifest.pxu new file mode 100644 index 0000000000..40cce5669c --- /dev/null +++ b/providers/base/units/mei/manifest.pxu @@ -0,0 +1,4 @@ +unit: manifest entry +id: has_mei +_name: Has support for Intel Management Engine Interface (MEI) +value-type: bool diff --git a/providers/base/units/mei/test-plan.pxu b/providers/base/units/mei/test-plan.pxu new file mode 100644 index 0000000000..b97137dd23 --- /dev/null +++ b/providers/base/units/mei/test-plan.pxu @@ -0,0 +1,24 @@ +id: mei-full +_name: Intel Management Engine Interface tests +unit: test plan +include: +nested_part: + mei-manual + mei-automated + + +id: mei-manual +_name: Intel Management Engine Interface tests (manual) +unit: test plan +include: +bootstrap_include: + + +id: mei-automated +_name: Intel Management Engine Interface tests (automated) +unit: test plan +include: + mei/check-module + mei/check-device + mei/get-firmware-version +bootstrap_include: diff --git a/providers/iiotg/units/mei/category.pxu b/providers/iiotg/units/mei/category.pxu deleted file mode 100644 index 70b3ee12fe..0000000000 --- a/providers/iiotg/units/mei/category.pxu +++ /dev/null @@ -1,3 +0,0 @@ -unit: category -id: intel-mei -_name: Intel Management Engine Interface \ No newline at end of file diff --git a/providers/iiotg/units/mei/jobs.pxu b/providers/iiotg/units/mei/jobs.pxu deleted file mode 100644 index 17ffb13da6..0000000000 --- a/providers/iiotg/units/mei/jobs.pxu +++ /dev/null @@ -1,32 +0,0 @@ - -id: mei/ensure-lms-installed -_summary: Ensure the the LMS snap is installed -category_id: intel-mei -requires: - snap.name == 'lms' -flags: fail-on-resource simple -command: true - -id: mei/ensure-lms-service-running -_summary: Test that the LMS service is running -category_id: intel-mei -depends: mei/ensure-lms-installed -command: - snap services lms.lms -flags: simple - -id: mei/get-lms-version -_summary: Test the LMS version can be retrieved from LMS service -category_id: intel-mei -depends: mei/ensure-lms-service-running -command: - dbus-send --system --print-reply --dest=com.intel.amt.lms /com/intel/amt/lms com.intel.amt.lms.PTHI.GetLMSVersion -flags: simple - -id: mei/get-amt-version -_summary: Test the AMT version can be retrieved from LMS service -category_id: intel-mei -depends: mei/ensure-lms-service-running -command: - dbus-send --system --print-reply --dest=com.intel.amt.lms /com/intel/amt/lms com.intel.amt.lms.PTHI.GetAMTVersion -flags: simple From 50df261548fad68b8e66a838e5741a7a76092db2 Mon Sep 17 00:00:00 2001 From: rickwu666666 <98441647+rickwu666666@users.noreply.github.com> Date: Wed, 20 Mar 2024 11:16:51 +0800 Subject: [PATCH 102/108] [checkbox-ce-oem] Add thermal test in strict confinement mode(New) (#1084) * Add thermal test in strict confinement mode --- .../test-plan-strict-confinement.pxu | 12 +++++++++++ .../thermal-strict-confinement.pxu | 20 +++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 contrib/checkbox-provider-ce-oem/units/strict-confinement/thermal-strict-confinement.pxu diff --git a/contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-strict-confinement.pxu b/contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-strict-confinement.pxu index e7a365d10a..f0ee0a77e9 100644 --- a/contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-strict-confinement.pxu +++ b/contrib/checkbox-provider-ce-oem/units/strict-confinement/test-plan-strict-confinement.pxu @@ -34,3 +34,15 @@ include: strict-confine/mediacard/sdhc-insert strict-confine/mediacard/sdhc-storage strict-confine/mediacard/sdhc-remove + + +id: strict-confine-thermal +unit: test plan +_name: Test thermal sensor in strict confinement mode. +_desription: + Test thermal sensor in strict ocnfinement mode. + Rely on test-strict-confinment SNAP to test. +bootstrap_include: + thermal_zones +include: + strict-confine/temperature-test diff --git a/contrib/checkbox-provider-ce-oem/units/strict-confinement/thermal-strict-confinement.pxu b/contrib/checkbox-provider-ce-oem/units/strict-confinement/thermal-strict-confinement.pxu new file mode 100644 index 0000000000..3e81a731b7 --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/units/strict-confinement/thermal-strict-confinement.pxu @@ -0,0 +1,20 @@ +unit: template +template-engine: jinja2 +template-resource: thermal_zones +template-id: strict-confine/temperature-test +id: strict-confine/temperature_{{ name }}_{{ type }} +_summary: Check Thermal temperature of {{ name }} - {{ type }} +_description: + Test a thermal temperature for {{ name }} - {{ type }}. +category_id: thermal +plugin: shell +estimated_duration: 5m +flags: also-after-suspend +imports: + from com.canonical.certification import snap + from com.canonical.certification import lsb +requires: + lsb.distributor_id == 'Ubuntu Core' + snap.name == 'test-strict-confinement' +command: + test-strict-confinement.thermal-test monitor -n {{ name }} --extra-commands "dd if=/dev/zero of=/dev/null" From 07f2985236d384c24bbf54ed17d657c4da5ae04f Mon Sep 17 00:00:00 2001 From: LiaoU3 <58060146+LiaoU3@users.noreply.github.com> Date: Wed, 20 Mar 2024 11:26:08 +0800 Subject: [PATCH 103/108] Add 'environ' for the job 'graphics/{index}_glmark2-es2_{product_slug}' and change from automated to manual (Bugfix) (#1077) * Add 'environ' for the job 'graphics/{index}_glmark2-es2_{product_slug}' * Change graphics/{index}_glmark2-es2_{product_slug} from automated to manual --- providers/base/units/graphics/jobs.pxu | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/providers/base/units/graphics/jobs.pxu b/providers/base/units/graphics/jobs.pxu index 59745f45d6..1ded351b20 100644 --- a/providers/base/units/graphics/jobs.pxu +++ b/providers/base/units/graphics/jobs.pxu @@ -497,12 +497,19 @@ _description: Check NVLINK are supported and NVLINK are connected properly on sy unit: template template-resource: graphics_card -plugin: shell +plugin: user-interact-verify category_id: com.canonical.plainbox::graphics id: graphics/{index}_glmark2-es2_{product_slug} requires: - executable.name == 'glmark2-es2' - 'classic' in environment.SNAP_NAME -command: glmark2-es2 --data-path "$CHECKBOX_RUNTIME"/usr/share/glmark2/ + executable.name == 'glmark2-es2' + 'classic' in environment.SNAP_NAME +environ: + CHECKBOX_RUNTIME +command: + glmark2-es2 --data-path "$CHECKBOX_RUNTIME"/usr/share/glmark2/ estimated_duration: 60.5 _summary: Run OpenGL ES 2.0 X11 benchmark on the {product} video card +_steps: + 1. Commence the test to start rendering +_verification: + Did all the videos and images rendered correctly? From 873cccb241152bd634fd85a7a98c0b77aa250edf Mon Sep 17 00:00:00 2001 From: rickwu666666 <98441647+rickwu666666@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:51:52 +0800 Subject: [PATCH 104/108] [Checkbox-ce-oem] Modify job and script to support use x-test tool(Bugfix) (#1075) * Modify job and script too support use x-test tool * Modify the logic to prioritize the x-test snap * Modify the method to get xtest in gadget snap Modify the method to call python script * Separate look_up_xtest to another script --- .../bin/look_up_xtest.py | 31 +++++++++++ .../bin/parse_optee_test.py | 33 ++++++++---- .../units/optee/jobs.pxu | 54 +++++-------------- 3 files changed, 67 insertions(+), 51 deletions(-) create mode 100755 contrib/checkbox-provider-ce-oem/bin/look_up_xtest.py diff --git a/contrib/checkbox-provider-ce-oem/bin/look_up_xtest.py b/contrib/checkbox-provider-ce-oem/bin/look_up_xtest.py new file mode 100755 index 0000000000..2b05a43550 --- /dev/null +++ b/contrib/checkbox-provider-ce-oem/bin/look_up_xtest.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 + +from checkbox_support.snap_utils.snapd import Snapd +from checkbox_support.snap_utils.system import get_gadget_snap + + +def look_up_xtest(): + if Snapd().list("x-test"): + return "x-test.xtest" + elif look_up_gadget() is not False: + return look_up_gadget() + else: + raise SystemExit(1) + + +def look_up_gadget(): + gadget = get_gadget_snap() + snap = Snapd().list(gadget) + if "apps" in snap.keys(): + for app in snap["apps"]: + if app["name"] == "xtest": + return ".".join([app["snap"], app["name"]]) + return False + + +def main(): + print(look_up_xtest()) + + +if __name__ == "__main__": + main() diff --git a/contrib/checkbox-provider-ce-oem/bin/parse_optee_test.py b/contrib/checkbox-provider-ce-oem/bin/parse_optee_test.py index d7778aec06..9c9c307e22 100755 --- a/contrib/checkbox-provider-ce-oem/bin/parse_optee_test.py +++ b/contrib/checkbox-provider-ce-oem/bin/parse_optee_test.py @@ -2,32 +2,45 @@ import json import argparse +from look_up_xtest import look_up_xtest -def parse_json_file(filename, filter=False): - with open(filename, 'r') as file: +def parse_json_file(filepath, filter=False, xtest=None): + with open(filepath, 'r') as file: data = json.load(file) for test in data: - if (filter and test['suite'] == 'pkcs11') or \ - (not filter and test['suite'] not in ['pkcs11']): - print_test_info(test) + if check_suite(test['suite'], filter): + print_test_info(test, xtest) -def print_test_info(test): +def check_suite(suite, filter): + if filter: + return suite == 'pkcs11' + else: + return suite != 'pkcs11' + + +def print_test_info(test, xtest): print("suite: {}".format(test['suite'])) print("test_id: {}".format(test['test_id'])) - print("description: {}\n".format(test['test_description'])) + print("description: {}".format(test['test_description'])) + print("tool: {}\n".format(xtest)) def main(): parser = argparse.ArgumentParser(description='Parse a JSON file.') - parser.add_argument('filename', help='The name of the file to parse.') + parser.add_argument('filepath', + help='The path of the file to parse.') parser.add_argument('-p', '--pkcs11', - help='To filter out PKCS11 for the suite' + help='To filter out PKCS11 for the suite.' 'field in JSON.', action='store_true') args = parser.parse_args() - parse_json_file(args.filename, args.pkcs11) + try: + xtest = look_up_xtest() + except SystemExit: + xtest = None + parse_json_file(args.filepath, args.pkcs11, xtest) if __name__ == "__main__": diff --git a/contrib/checkbox-provider-ce-oem/units/optee/jobs.pxu b/contrib/checkbox-provider-ce-oem/units/optee/jobs.pxu index b2a2cfbdd0..ede04a236a 100644 --- a/contrib/checkbox-provider-ce-oem/units/optee/jobs.pxu +++ b/contrib/checkbox-provider-ce-oem/units/optee/jobs.pxu @@ -21,26 +21,18 @@ command: id: ce-oem-optee/xtest-check category_id: optee -_summary: Check if xtest is in the gedget snap. +_summary: Check if xtest is in the system. plugin: shell user: root estimated_duration: 20.0 -environ: OPTEE_TOOL command: - if [[ -z "$OPTEE_TOOL" ]]; then - gadget=$(snap list | awk '$NF == "gadget" {print $1}') - xtest=$(snap info "$gadget"|grep xtest) - if [[ "$xtest" ]]; then - echo -e "\nInfo: Gadget snap support xtest command!" - echo -e "$xtest" - exit 0 - else - echo -e "\nError: Gadget snap not support xtest command!" - echo -e "\nError: Please identify the tool name if needed!" - exit 1 - fi + tool=$(look_up_xtest.py) + exit_status=$? + if [[ "$exit_status" -eq 0 ]]; then + echo "Info: Found xtest runnable $tool" else - echo -e "\nInfo: Using $OPTEE_TOOL to excute OP-TEE test" + echo "Error: Not able to found xtest runnable tool" + exit 1 fi depends: ce-oem-optee/device-node @@ -51,18 +43,13 @@ plugin: shell user: root estimated_duration: 20.0 depends: ce-oem-optee/xtest-check -environ: OPTEE_TOOL command: - tool="" + tool=$(look_up_xtest.py) ta_path="" - if [[ -n "$OPTEE_TOOL" ]]; then - tool="$OPTEE_TOOL" - ta_path="/lib/optee_armtz/" + if [[ "$tool" == "x-test.xtest" ]]; then + ta_path="$(find /var/snap -wholename */lib/optee_armtz)" else - gadget=$(snap list | awk '$NF == "gadget" {print $1}') - # TA will be copied into this path once interface tee-tas connected. - # optee-test snap need to be installed in devmode - tool="$gadget.xtest" + gadget=$(awk -F"." '{ print $1}' <<< "$tool") ta_path="/snap/$gadget/current/lib/optee_armtz/" fi if [[ -z "$(find "$ta_path" -mindepth 1 -type f -o -type d)" ]]; then @@ -121,16 +108,8 @@ category_id: optee estimated_duration: 30 flags: also-after-suspend depends: ce-oem-optee/ta-install -environ: OPTEE_TOOL command: - tool="" - if [[ -n "$OPTEE_TOOL" ]]; then - tool="$OPTEE_TOOL" - else - gadget=$(snap list | awk '$NF == "gadget" {print $1}') - tool="$gadget.xtest" - fi - "$tool" -t {{ suite }} {{ test_id }} + {{ tool }} -t {{ suite }} {{ test_id }} unit: template template-resource: ce-oem-optee-test-list-pkcs11 @@ -146,11 +125,4 @@ estimated_duration: 30 depends: ce-oem-optee/xtest-check flags: also-after-suspend command: - tool="" - if [[ -n "$OPTEE_TOOL" ]]; then - tool="$OPTEE_TOOL" - else - gadget=$(snap list | awk '$NF == "gadget" {print $1}') - tool="$gadget.xtest" - fi - "$tool" -t {{ suite }} {{ test_id }} + {{ tool }} -t {{ suite }} {{ test_id }} From 2827ebfa73dcc27d731977be2cc72120fa7dc342 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Wed, 20 Mar 2024 11:02:41 +0100 Subject: [PATCH 105/108] Fix missing lsb-release build-depends in core16 snapcraft.yaml (infra) (#1091) Fix missing lsb-release snapcraft.yaml --- checkbox-core-snap/series16/snap/snapcraft.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/checkbox-core-snap/series16/snap/snapcraft.yaml b/checkbox-core-snap/series16/snap/snapcraft.yaml index a9ac69f502..92e8ff8d4d 100644 --- a/checkbox-core-snap/series16/snap/snapcraft.yaml +++ b/checkbox-core-snap/series16/snap/snapcraft.yaml @@ -204,6 +204,7 @@ parts: - python3-xlsxwriter - python3-setuptools - python3-dev + - lsb-release python-packages: - tqdm after: [checkbox-support] From 884c44919500e69fb4c2fc5a7f66df43bae28812 Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Wed, 20 Mar 2024 22:50:26 +0800 Subject: [PATCH 106/108] Add 'expand' subcommand to list jobs and templates in a test plan (New) (#1065) * Add 'expand' subcommand to list jobs and templates in a test plan The new subcommand "expands" a given test plan by going through all its sections and returning the pure (non-instantiated) jobs and templates being used. It is different from the "list-boostrapped" subcommand since it does not perform any bootstrap. By default, it outputs the jobs/templates id as text, but it can be called with the `--format json` option to output JSON data with all the known information for said jobs and templates. This is to be used by external tools, for instance to create a document listing information (summary, description...) about executed jobs and templates. Fix CHECKBOX-1263 * Rename plainbox.impl.secure.qualifiers.select_jobs() to select_units() The select_jobs() function can actually be called with not only a list of jobs, but also templates. * Rename IJobQualifier to IUnitQualifier Qualifiers can be used on jobs as well as templates. * Add unit tests * Sort the unit list before returning it Sort the unit list by id/template-id before printing it out. In addition, sort the keys inside each JSON record for easier comparison. --- .../checkbox_ng/launcher/checkbox_cli.py | 2 + .../checkbox_ng/launcher/subcommands.py | 89 +++++++++++++++ .../checkbox_ng/launcher/test_subcommands.py | 90 +++++++++++++++ checkbox-ng/plainbox/abc.py | 22 ++-- checkbox-ng/plainbox/impl/applogic.py | 8 +- .../plainbox/impl/secure/qualifiers.py | 105 +++++++++--------- .../plainbox/impl/secure/test_qualifiers.py | 94 ++++++++-------- .../plainbox/impl/session/assistant.py | 16 +-- checkbox-ng/plainbox/impl/session/state.py | 6 +- .../plainbox/impl/session/test_assistant.py | 37 ++++++ .../plainbox/impl/session/test_resume.py | 12 +- .../plainbox/impl/session/test_state.py | 8 ++ 12 files changed, 359 insertions(+), 130 deletions(-) diff --git a/checkbox-ng/checkbox_ng/launcher/checkbox_cli.py b/checkbox-ng/checkbox_ng/launcher/checkbox_cli.py index c16ab3fa63..ce6239fb04 100644 --- a/checkbox-ng/checkbox_ng/launcher/checkbox_cli.py +++ b/checkbox-ng/checkbox_ng/launcher/checkbox_cli.py @@ -37,6 +37,7 @@ StartProvider, Submit, ListBootstrapped, + Expand, TestPlanExport, Show, ) @@ -72,6 +73,7 @@ def main(): "submit": Submit, "show": Show, "list-bootstrapped": ListBootstrapped, + "expand": Expand, "merge-reports": MergeReports, "merge-submissions": MergeSubmissions, "tp-export": TestPlanExport, diff --git a/checkbox-ng/checkbox_ng/launcher/subcommands.py b/checkbox-ng/checkbox_ng/launcher/subcommands.py index b137589a73..e3c84fe7bb 100644 --- a/checkbox-ng/checkbox_ng/launcher/subcommands.py +++ b/checkbox-ng/checkbox_ng/launcher/subcommands.py @@ -19,6 +19,7 @@ Definition of sub-command classes for checkbox-cli """ from argparse import ArgumentTypeError +from argparse import RawDescriptionHelpFormatter from argparse import SUPPRESS from collections import defaultdict from string import Formatter @@ -45,12 +46,14 @@ from plainbox.impl.result import MemoryJobResult from plainbox.impl.runner import slugify from plainbox.impl.secure.sudo_broker import sudo_password_provider +from plainbox.impl.secure.qualifiers import select_units from plainbox.impl.session.assistant import SA_RESTARTABLE from plainbox.impl.session.restart import detect_restart_strategy from plainbox.impl.session.storage import WellKnownDirsHelper from plainbox.impl.transport import TransportError from plainbox.impl.transport import get_all_transports from plainbox.impl.transport import SECURE_ID_PATTERN +from plainbox.impl.unit.testplan import TestPlanUnitSupport from checkbox_ng.config import load_configs from checkbox_ng.launcher.stages import MainLoopStage, ReportsStage @@ -1267,6 +1270,92 @@ def __missing__(self, key): print_objs(ctx.args.GROUP, ctx.sa, ctx.args.attrs) +class Expand: + def __init__(self): + self.override_list = [] + + @property + def sa(self): + return self.ctx.sa + + def register_arguments(self, parser): + parser.formatter_class = RawDescriptionHelpFormatter + parser.description = ( + "Expand a given test plan: display all the jobs and templates " + "that are defined in this test plan and that would be executed " + "if ran. This is useful to visualize the full list of jobs and " + "templates for complex test plans that consist of many nested " + "parts with different 'include' and 'exclude' sections.\n\n" + "NOTE: the elements listed here are not sorted by execution " + "order. To see the execution order, please use the " + "'list-bootstrapped' command instead." + ) + parser.add_argument("TEST_PLAN", help=_("test-plan id to expand")) + parser.add_argument( + "-f", + "--format", + type=str, + default="text", + help=_("output format: 'text' or 'json' (default: %(default)s)"), + ) + + def invoked(self, ctx): + self.ctx = ctx + session_title = "checkbox-expand-{}".format(ctx.args.TEST_PLAN) + self.sa.start_new_session(session_title) + tps = self.sa.get_test_plans() + if ctx.args.TEST_PLAN not in tps: + raise SystemExit("Test plan not found") + self.sa.select_test_plan(ctx.args.TEST_PLAN) + all_jobs_and_templates = [ + unit + for unit in self.sa._context.state.unit_list + if unit.unit in ["job", "template"] + ] + tp = self.sa._context._test_plan_list[0] + tp_us = TestPlanUnitSupport(tp) + self.override_list = tp_us.override_list + jobs_and_templates_list = select_units( + all_jobs_and_templates, + [tp.get_mandatory_qualifier()] + [tp.get_qualifier()], + ) + + obj_list = [] + for unit in jobs_and_templates_list: + obj = unit._raw_data.copy() + obj["unit"] = unit.unit + obj["id"] = unit.id # To get the fully qualified id + obj[ + "certification-status" + ] = self.get_effective_certification_status(unit) + if unit.template_id: + obj["template-id"] = unit.template_id + obj_list.append(obj) + obj_list.sort(key=lambda x: x.get("template-id", x["id"])) + if ctx.args.format == "json": + print(json.dumps(obj_list, sort_keys=True)) + else: + for obj in obj_list: + if obj["unit"] == "template": + print("Template '{}'".format(obj["template-id"])) + else: + print("Job '{}'".format(obj["id"])) + + def get_effective_certification_status(self, unit): + if unit.unit == "template": + unit_id = unit.template_id + else: + unit_id = unit.id + for regex, override_field_list in self.override_list: + if re.match(regex, unit_id): + for field, value in override_field_list: + if field == "certification_status": + return value + if hasattr(unit, "certification_status"): + return unit.certification_status + return "unspecified" + + class ListBootstrapped: @property def sa(self): diff --git a/checkbox-ng/checkbox_ng/launcher/test_subcommands.py b/checkbox-ng/checkbox_ng/launcher/test_subcommands.py index 7c01e0e715..ede86d5a45 100644 --- a/checkbox-ng/checkbox_ng/launcher/test_subcommands.py +++ b/checkbox-ng/checkbox_ng/launcher/test_subcommands.py @@ -24,7 +24,12 @@ from unittest.mock import patch, Mock, MagicMock from io import StringIO + +from plainbox.impl.unit.job import JobDefinition +from plainbox.impl.unit.template import TemplateUnit + from checkbox_ng.launcher.subcommands import ( + Expand, Launcher, ListBootstrapped, IncompatibleJobError, @@ -682,6 +687,91 @@ def test_invoke_print_output_customized_format(self, stdout): self.assertEqual(stdout.getvalue(), expected_out) +class TestExpand(TestCase): + def setUp(self): + self.launcher = Expand() + self.ctx = Mock() + self.ctx.args = Mock(TEST_PLAN="", format="") + self.ctx.sa = Mock( + start_new_session=Mock(), + get_test_plans=Mock(return_value=["test-plan1", "test-plan2"]), + select_test_plan=Mock(), + # get_resumable_sessions=Mock(return_value=[]), + _context=Mock( + state=Mock( + unit_list=[] + ), + _test_plan_list=[Mock()], + ), + ) + + def test_register_arguments(self): + parser_mock = Mock() + self.launcher.register_arguments(parser_mock) + self.assertTrue(parser_mock.add_argument.called) + + def test_invoke__test_plan_not_found(self): + self.ctx.args.TEST_PLAN = "test-plan3" + + with self.assertRaisesRegex(SystemExit, "Test plan not found"): + self.launcher.invoked(self.ctx) + + @patch("sys.stdout", new_callable=StringIO) + @patch("checkbox_ng.launcher.subcommands.TestPlanUnitSupport") + @patch("checkbox_ng.launcher.subcommands.select_units") + def test_invoke__text(self, mock_select_units, mock_tpus, stdout): + template1 = TemplateUnit({ + "template-id": "test-template", + "id": "test-{res}", + "template-summary": "Test Template Summary", + }) + job1 = JobDefinition({ + "id": "job1", + }) + mock_select_units.return_value = [job1, template1] + self.ctx.args.TEST_PLAN = "test-plan1" + self.launcher.invoked(self.ctx) + self.assertIn("Template 'test-template'", stdout.getvalue()) + + @patch("sys.stdout", new_callable=StringIO) + @patch("checkbox_ng.launcher.subcommands.TestPlanUnitSupport") + @patch("checkbox_ng.launcher.subcommands.select_units") + def test_invoke__json(self, mock_select_units, mock_tpus, stdout): + template1 = TemplateUnit({ + "template-id": "test-template", + "id": "test-{res}", + "template-summary": "Test Template Summary", + }) + job1 = JobDefinition({ + "id": "job1", + }) + mock_select_units.return_value = [job1, template1] + self.ctx.args.TEST_PLAN = "test-plan1" + self.ctx.args.format = "json" + self.launcher.invoked(self.ctx) + self.assertIn('"template-id": "test-template"', stdout.getvalue()) + + def test_get_effective_certificate_status(self): + job1 = JobDefinition({ + "id": "job1", + }) + template1 = TemplateUnit({ + "template-id": "template1", + "id": "job-{res}", + }) + self.launcher.override_list = [ + ("^job1$", [("certification_status", "blocker"),]), + ] + self.assertEqual( + self.launcher.get_effective_certification_status(job1), + "blocker" + ) + self.assertEqual( + self.launcher.get_effective_certification_status(template1), + "unspecified" + ) + + class TestUtilsFunctions(TestCase): @patch("checkbox_ng.launcher.subcommands.Colorizer", new=MagicMock()) @patch("builtins.print") diff --git a/checkbox-ng/plainbox/abc.py b/checkbox-ng/plainbox/abc.py index 56b6ac0730..2c74ce7a02 100644 --- a/checkbox-ng/plainbox/abc.py +++ b/checkbox-ng/plainbox/abc.py @@ -287,12 +287,12 @@ def is_hollow(self): """ -class IJobQualifier(metaclass=ABCMeta): +class IUnitQualifier(metaclass=ABCMeta): """ - An opaque qualifier for a job definition. + An opaque qualifier for a unit (job or template). - This is an abstraction for matching jobs definitions to names, patterns and - other means of selecting jobs. + This is an abstraction for matching jobs and templates to names, patterns + and other means of selecting jobs and templates. There are two ways to use a qualifier object. The naive, direct, old API can simply check if a qualifier designates a particular job (if it selects @@ -305,15 +305,15 @@ class IJobQualifier(metaclass=ABCMeta): expressiveness can be preserved. :attr VOTE_EXCLUDE: - (0) vote indicating that a job should *not* be included for + (0) vote indicating that a unit should *not* be included for selection. It overwrites any other votes. :attr VOTE_INCLUDE: - (1) vote indicating that a job should be included for selection. It is + (1) vote indicating that a unit should be included for selection. It is overridden by VOTE_EXCLUDE. :attr VOTE_IGNORE: - (2) vote indicating that a job should neither be included nor excluded + (2) vote indicating that a unit should neither be included nor excluded for selection. This is a neutral value overridden by all other votes. """ @@ -326,13 +326,13 @@ class IJobQualifier(metaclass=ABCMeta): VOTE_IGNORE = 2 @abstractmethod - def get_vote(self, job): + def get_vote(self, unit): """ Get one of the :attr:`VOTE_IGNORE`, :attr:`VOTE_INCLUDE`, :attr:`VOTE_EXCLUDE` votes that this qualifier associated with the - specified job. + specified unit. - :param job: + :param unit: A IJobDefinition instance that is to be visited :returns: one of the ``VOTE_xxx`` constants @@ -346,7 +346,7 @@ def get_primitive_qualifiers(self): Return a list of primitives that constitute this qualifier. :returns: - A list of IJobQualifier objects that each is the smallest, + A list of IUnitQualifier objects that each is the smallest, indivisible entity. When each vote cast by those qualifiers is applied sequentially to diff --git a/checkbox-ng/plainbox/impl/applogic.py b/checkbox-ng/plainbox/impl/applogic.py index e53ef631b4..9ebdce8abc 100644 --- a/checkbox-ng/plainbox/impl/applogic.py +++ b/checkbox-ng/plainbox/impl/applogic.py @@ -32,20 +32,20 @@ from plainbox.i18n import gettext as _ from plainbox.impl.result import MemoryJobResult from plainbox.impl.secure import config -from plainbox.impl.secure.qualifiers import select_jobs +from plainbox.impl.secure.qualifiers import select_units from plainbox.impl.session import SessionManager from plainbox.impl.session.jobs import InhibitionCause -# Deprecated, use plainbox.impl.secure.qualifiers.select_jobs() instead +# Deprecated, use plainbox.impl.secure.qualifiers.select_units() instead def get_matching_job_list(job_list, qualifier): """ Get a list of jobs that are designated by the specified qualifier. This is intended to be used with :class:`CompositeQualifier` - but works with any :class:`IJobQualifier` subclass. + but works with any :class:`IUnitQualifier` subclass. """ - return select_jobs(job_list, [qualifier]) + return select_units(job_list, [qualifier]) def run_job_if_possible(session, runner, config, job, update=True, ui=None): diff --git a/checkbox-ng/plainbox/impl/secure/qualifiers.py b/checkbox-ng/plainbox/impl/secure/qualifiers.py index 7c4ab1f7cf..fab5a419c8 100644 --- a/checkbox-ng/plainbox/impl/secure/qualifiers.py +++ b/checkbox-ng/plainbox/impl/secure/qualifiers.py @@ -34,7 +34,7 @@ import re import sre_constants -from plainbox.abc import IJobQualifier +from plainbox.abc import IUnitQualifier from plainbox.i18n import gettext as _ from plainbox.impl import pod from plainbox.impl.secure.origin import FileTextSource @@ -45,7 +45,7 @@ _logger = logging.getLogger("plainbox.secure.qualifiers") -class SimpleQualifier(IJobQualifier): +class SimpleQualifier(IUnitQualifier): """ Abstract base class that implements common features of simple (non composite) qualifiers. This allows two concrete subclasses below to @@ -111,7 +111,7 @@ def get_primitive_qualifiers(self): Return a list of primitives that constitute this qualifier. :returns: - A list of IJobQualifier objects that each is the smallest, + A list of IUnitQualifier objects that each is the smallest, indivisible entity. Here it just returns a list of one element, itself. @@ -381,7 +381,7 @@ def is_primitive(self): return False def designates(self, job): - return self.get_vote(job) == IJobQualifier.VOTE_INCLUDE + return self.get_vote(job) == IUnitQualifier.VOTE_INCLUDE def get_vote(self, job): """ @@ -403,7 +403,7 @@ def get_vote(self, job): qualifier.get_vote(job) for qualifier in self.qualifier_list]) else: - return IJobQualifier.VOTE_IGNORE + return IUnitQualifier.VOTE_IGNORE def get_primitive_qualifiers(self): return get_flat_primitive_qualifier_list(self.qualifier_list) @@ -413,12 +413,12 @@ def origin(self): raise NonPrimitiveQualifierOrigin -IJobQualifier.register(CompositeQualifier) +IUnitQualifier.register(CompositeQualifier) class NonPrimitiveQualifierOrigin(Exception): """ - Exception raised when IJobQualifier.origin is meaningless as it is being + Exception raised when IUnitQualifier.origin is meaningless as it is being requested on a non-primitive qualifier such as the CompositeQualifier """ @@ -429,26 +429,26 @@ def get_flat_primitive_qualifier_list(qualifier_list): for qual in qualifier_list])) -def select_jobs(job_list, qualifier_list): +def select_units(unit_list, qualifier_list): """ - Select desired jobs. + Select desired units. - :param job_list: - A list of JobDefinition objects + :param unit_list: + A list of units (JobDefinition or TemplateUnit) :param qualifier_list: - A list of IJobQualifier objects. + A list of IUnitQualifier objects. :returns: - A sub-list of JobDefinition objects, selected from job_list. + A sub-list of units, selected from unit_list. """ # Flatten the qualifier list, so that we can see the fine structure of # composite objects. flat_qualifier_list = get_flat_primitive_qualifier_list(qualifier_list) - # Short-circuit if there are no jobs to select. Min is used later and this + # Short-circuit if there are no units to select. Min is used later and this # will allow us to assume that the matrix is not empty. if not flat_qualifier_list: return [] # Vote matrix, encodes the vote cast by a particular qualifier for a - # particular job. Visually it's a two-dimensional array like this: + # particular unit. Visually it's a two-dimensional array like this: # # ^ # q | @@ -461,13 +461,13 @@ def select_jobs(job_list, qualifier_list): # e | . # r | # -------------------> - # job + # unit # # The vertical axis represents qualifiers from the flattened qualifier - # list. The horizontal axis represents jobs from job list. Dots represent - # inclusion, X represents exclusion. + # list. The horizontal axis represents units from unit_list. Dots + # represent inclusion, X represents exclusion. # - # The result of the select_jobs() function is a list of jobs that have at + # The result of the select_units() function is a list of units that have at # least one inclusion and no exclusions. The resulting list is ordered by # increasing qualifier index. # @@ -475,45 +475,46 @@ def select_jobs(job_list, qualifier_list): # # The first step iterates over the vote matrix (row-major, meaning that we # visit all columns for each visit of one row) and constructs two - # structures: a set of jobs that got VOTE_INCLUDE and a list of those jobs, - # in the order of discovery. All VOTE_EXCLUDE votes are collected in + # structures: a set of units that got VOTE_INCLUDE and a list of those + # units, in the order of discovery. All VOTE_EXCLUDE votes are collected in # another set. # - # The second step filters-out all items from the excluded job set from the - # selected job list. + # The second step filters-out all items from the excluded unit set from the + # selected unit list. # # The final complexity is O(N x M) + O(M), where N is the number of - # qualifiers (flattened) and M is the number of jobs. The algorithm assumes - # that set lookup is a O(1) operation which is true enough for python. + # qualifiers (flattened) and M is the number of units. The algorithm + # assumes that set lookup is a O(1) operation which is true enough for + # python. # # A possible optimization would differentiate qualifiers that may select - # more than one job and fall-back to the current implementation while - # short-circuiting qualifiers that may select at most one job with a + # more than one unit and fall-back to the current implementation while + # short-circuiting qualifiers that may select at most one unit with a # separate set lookup. That would make the algorithm "mostly" linear in the # common case. # # As a separate feature, we might return a list of qualifiers that never # matched anything. That may be helpful for debugging. - # A list is needed to keep the job ordering, while the sets prevent + # A list is needed to keep the unit ordering, while the sets prevent # duplicates. included_list = [] included_set = set() excluded_set = set() - def _handle_vote(qualifier, job): + def _handle_vote(qualifier, unit): """ - Update list and sets of included/excluded jobs based on their related + Update list and sets of included/excluded units based on their related qualifiers. """ - vote = qualifier.get_vote(job) - if vote == IJobQualifier.VOTE_INCLUDE: - if job in included_set: + vote = qualifier.get_vote(unit) + if vote == IUnitQualifier.VOTE_INCLUDE: + if unit in included_set: return - included_set.add(job) - included_list.append(job) - elif vote == IJobQualifier.VOTE_EXCLUDE: - excluded_set.add(job) + included_set.add(unit) + included_list.append(unit) + elif vote == IUnitQualifier.VOTE_EXCLUDE: + excluded_set.add(unit) for qualifier in flat_qualifier_list: if (isinstance(qualifier, FieldQualifier) and @@ -521,20 +522,22 @@ def _handle_vote(qualifier, job): isinstance(qualifier.matcher, OperatorMatcher) and qualifier.matcher.op == operator.eq): # optimize the super-common case where a qualifier refers to - # a specific job by using the id_to_index_map to instantly - # perform the requested operation on a single job - for job in job_list: - if job.id == qualifier.matcher.value: - _handle_vote(qualifier, job) + # a specific unit by using the id_to_index_map to instantly + # perform the requested operation on a single unit + for unit in unit_list: + if unit.id == qualifier.matcher.value: + _handle_vote(qualifier, unit) break - elif job.template_id == qualifier.matcher.value: - # the qualifier matches the template id this job has been - # instantiated from, need to get the vote for this job - # based on its template_id field, not its id field + elif unit.template_id == qualifier.matcher.value: + # the qualifier matches the template id information, + # that is either the template id this job has been + # instantiated from, or the template itself. Need to get + # the vote for this unit based on its template_id field, + # not its id field qualifier.field = "template_id" - _handle_vote(qualifier, job) + _handle_vote(qualifier, unit) else: - for job in job_list: - _handle_vote(qualifier, job) - return [job for job in included_list - if job not in excluded_set] + for unit in unit_list: + _handle_vote(qualifier, unit) + return [unit for unit in included_list + if unit not in excluded_set] diff --git a/checkbox-ng/plainbox/impl/secure/test_qualifiers.py b/checkbox-ng/plainbox/impl/secure/test_qualifiers.py index 86e09cd023..b08346b467 100644 --- a/checkbox-ng/plainbox/impl/secure/test_qualifiers.py +++ b/checkbox-ng/plainbox/impl/secure/test_qualifiers.py @@ -30,7 +30,7 @@ from unittest import TestCase import operator -from plainbox.abc import IJobQualifier +from plainbox.abc import IUnitQualifier from plainbox.impl.job import JobDefinition from plainbox.impl.secure.origin import FileTextSource from plainbox.impl.secure.origin import Origin @@ -43,23 +43,23 @@ from plainbox.impl.secure.qualifiers import OperatorMatcher from plainbox.impl.secure.qualifiers import PatternMatcher from plainbox.impl.secure.qualifiers import RegExpJobQualifier -from plainbox.impl.secure.qualifiers import select_jobs +from plainbox.impl.secure.qualifiers import select_units from plainbox.impl.secure.qualifiers import SimpleQualifier from plainbox.impl.testing_utils import make_job from plainbox.vendor import mock -class IJobQualifierTests(TestCase): +class IUnitQualifierTests(TestCase): """ - Test cases for IJobQualifier interface + Test cases for IUnitQualifier interface """ - def test_IJobQualifier_is_abstract(self): + def test_IUnitQualifier_is_abstract(self): """ - Verify that IJobQualifier is an interface and cannot be + Verify that IUnitQualifier is an interface and cannot be instantiated """ - self.assertRaises(TypeError, IJobQualifier) + self.assertRaises(TypeError, IUnitQualifier) class DummySimpleQualifier(SimpleQualifier): @@ -108,11 +108,11 @@ def test_designates(self): the same job returns VOTE_INCLUDE. """ with mock.patch.object(self.obj, 'get_vote') as mock_get_vote: - mock_get_vote.return_value = IJobQualifier.VOTE_INCLUDE + mock_get_vote.return_value = IUnitQualifier.VOTE_INCLUDE self.assertTrue(self.obj.designates(self.job)) - mock_get_vote.return_value = IJobQualifier.VOTE_EXCLUDE + mock_get_vote.return_value = IUnitQualifier.VOTE_EXCLUDE self.assertFalse(self.obj.designates(self.job)) - mock_get_vote.return_value = IJobQualifier.VOTE_IGNORE + mock_get_vote.return_value = IUnitQualifier.VOTE_IGNORE self.assertFalse(self.obj.designates(self.job)) def test_get_vote__inclusive_matching(self): @@ -124,7 +124,7 @@ def test_get_vote__inclusive_matching(self): with mock.patch.object(obj, 'get_simple_match') as mock_gsm: mock_gsm.return_value = True self.assertEqual(obj.get_vote(self.job), - IJobQualifier.VOTE_INCLUDE) + IUnitQualifier.VOTE_INCLUDE) def test_get_vote__not_inclusive_matching(self): """ @@ -135,7 +135,7 @@ def test_get_vote__not_inclusive_matching(self): with mock.patch.object(obj, 'get_simple_match') as mock_gsm: mock_gsm.return_value = True self.assertEqual(obj.get_vote(self.job), - IJobQualifier.VOTE_EXCLUDE) + IUnitQualifier.VOTE_EXCLUDE) def test_get_vote__inclusive_nonmatching(self): """ @@ -145,7 +145,7 @@ def test_get_vote__inclusive_nonmatching(self): obj = DummySimpleQualifier(self.origin, inclusive=True) with mock.patch.object(obj, 'get_simple_match') as mock_gsm: mock_gsm.return_value = False - self.assertEqual(obj.get_vote(self.job), IJobQualifier.VOTE_IGNORE) + self.assertEqual(obj.get_vote(self.job), IUnitQualifier.VOTE_IGNORE) def test_get_vote__not_inclusive_nonmatching(self): """ @@ -155,7 +155,7 @@ def test_get_vote__not_inclusive_nonmatching(self): obj = DummySimpleQualifier(self.origin, inclusive=False) with mock.patch.object(obj, 'get_simple_match') as mock_gsm: mock_gsm.return_value = False - self.assertEqual(obj.get_vote(self.job), IJobQualifier.VOTE_IGNORE) + self.assertEqual(obj.get_vote(self.job), IUnitQualifier.VOTE_IGNORE) def test_get_primitive_qualifiers(self): """ @@ -301,19 +301,19 @@ def test_get_vote(self): self.assertEqual( RegExpJobQualifier("foo", self.origin).get_vote( JobDefinition({'id': 'foo'})), - IJobQualifier.VOTE_INCLUDE) + IUnitQualifier.VOTE_INCLUDE) self.assertEqual( RegExpJobQualifier("foo", self.origin, inclusive=False).get_vote( JobDefinition({'id': 'foo'})), - IJobQualifier.VOTE_EXCLUDE) + IUnitQualifier.VOTE_EXCLUDE) self.assertEqual( RegExpJobQualifier("foo", self.origin).get_vote( JobDefinition({'id': 'bar'})), - IJobQualifier.VOTE_IGNORE) + IUnitQualifier.VOTE_IGNORE) self.assertEqual( RegExpJobQualifier("foo", self.origin, inclusive=False).get_vote( JobDefinition({'id': 'bar'})), - IJobQualifier.VOTE_IGNORE) + IUnitQualifier.VOTE_IGNORE) class JobIdQualifierTests(TestCase): @@ -352,19 +352,19 @@ def test_get_vote(self): self.assertEqual( JobIdQualifier("foo", self.origin).get_vote( JobDefinition({'id': 'foo'})), - IJobQualifier.VOTE_INCLUDE) + IUnitQualifier.VOTE_INCLUDE) self.assertEqual( JobIdQualifier("foo", self.origin, inclusive=False).get_vote( JobDefinition({'id': 'foo'})), - IJobQualifier.VOTE_EXCLUDE) + IUnitQualifier.VOTE_EXCLUDE) self.assertEqual( JobIdQualifier("foo", self.origin).get_vote( JobDefinition({'id': 'bar'})), - IJobQualifier.VOTE_IGNORE) + IUnitQualifier.VOTE_IGNORE) self.assertEqual( JobIdQualifier("foo", self.origin, inclusive=False).get_vote( JobDefinition({'id': 'bar'})), - IJobQualifier.VOTE_IGNORE) + IUnitQualifier.VOTE_IGNORE) def test_smoke(self): """ @@ -402,33 +402,33 @@ def test_get_vote(self): # Default is IGNORE self.assertEqual( CompositeQualifier([]).get_vote(make_job("foo")), - IJobQualifier.VOTE_IGNORE) + IUnitQualifier.VOTE_IGNORE) # Any match is INCLUDE self.assertEqual( CompositeQualifier([ RegExpJobQualifier("foo", self.origin), ]).get_vote(make_job("foo")), - IJobQualifier.VOTE_INCLUDE) + IUnitQualifier.VOTE_INCLUDE) # Any negative match is EXCLUDE self.assertEqual( CompositeQualifier([ RegExpJobQualifier("foo", self.origin, inclusive=False), ]).get_vote(make_job("foo")), - IJobQualifier.VOTE_EXCLUDE) + IUnitQualifier.VOTE_EXCLUDE) # Negative matches take precedence over positive matches self.assertEqual( CompositeQualifier([ RegExpJobQualifier("foo", self.origin), RegExpJobQualifier("foo", self.origin, inclusive=False), ]).get_vote(make_job("foo")), - IJobQualifier.VOTE_EXCLUDE) + IUnitQualifier.VOTE_EXCLUDE) # Unrelated patterns are not affecting the result self.assertEqual( CompositeQualifier([ RegExpJobQualifier("foo", self.origin), RegExpJobQualifier("bar", self.origin), ]).get_vote(make_job("foo")), - IJobQualifier.VOTE_INCLUDE) + IUnitQualifier.VOTE_INCLUDE) def test_inclusive(self): """ @@ -494,16 +494,16 @@ class FunctionTests(TestCase): def setUp(self): self.origin = mock.Mock(name='origin', spec_set=Origin) - def test_select_jobs__empty_qualifier_list(self): + def test_select_units__empty_qualifier_list(self): """ - verify that select_jobs() returns an empty list if no qualifiers are + verify that select_units() returns an empty list if no qualifiers are passed """ - self.assertEqual(select_jobs([], []), []) + self.assertEqual(select_units([], []), []) - def test_select_jobs__inclusion(self): + def test_select_units__inclusion(self): """ - verify that select_jobs() honors qualifier ordering + verify that select_units() honors qualifier ordering """ job_a = JobDefinition({'id': 'a'}) job_b = JobDefinition({'id': 'b'}) @@ -514,12 +514,12 @@ def test_select_jobs__inclusion(self): # Regardless of how the list of job is ordered the result # should be the same, depending on the qualifier list self.assertEqual( - select_jobs(job_list, [qual_a, qual_c]), + select_units(job_list, [qual_a, qual_c]), [job_a, job_c]) - def test_select_jobs__exclusion(self): + def test_select_units__exclusion(self): """ - verify that select_jobs() honors qualifier ordering + verify that select_units() honors qualifier ordering """ job_a = JobDefinition({'id': 'a'}) job_b = JobDefinition({'id': 'b'}) @@ -534,12 +534,12 @@ def test_select_jobs__exclusion(self): # Regardless of how the list of job is ordered the result # should be the same, depending on the qualifier list self.assertEqual( - select_jobs(job_list, [qual_all, qual_not_c]), + select_units(job_list, [qual_all, qual_not_c]), [job_a, job_b]) - def test_select_jobs__id_field_qualifier(self): + def test_select_units__id_field_qualifier(self): """ - verify that select_jobs() only returns the job that matches a given + verify that select_units() only returns the job that matches a given FieldQualifier """ job_a = JobDefinition({'id': 'a'}) @@ -549,11 +549,11 @@ def test_select_jobs__id_field_qualifier(self): qual = FieldQualifier("id", matcher, self.origin, True) job_list = [job_a, job_b, job_c] expected_list = [job_a] - self.assertEqual(select_jobs(job_list, [qual]), expected_list) + self.assertEqual(select_units(job_list, [qual]), expected_list) - def test_select_jobs__id_field_qualifier_twice(self): + def test_select_units__id_field_qualifier_twice(self): """ - verify that select_jobs() only returns the job that matches a given + verify that select_units() only returns the job that matches a given FieldQualifier once, even if it has been added twice """ job_a = JobDefinition({'id': 'a'}) @@ -561,11 +561,11 @@ def test_select_jobs__id_field_qualifier_twice(self): qual = FieldQualifier("id", matcher, self.origin, True) job_list = [job_a, job_a] expected_list = [job_a] - self.assertEqual(select_jobs(job_list, [qual, qual]), expected_list) + self.assertEqual(select_units(job_list, [qual, qual]), expected_list) - def test_select_jobs__template_id_field_qualifier(self): + def test_select_units__template_id_field_qualifier(self): """ - verify that select_jobs() only returns the jobs that have been + verify that select_units() only returns the jobs that have been instantiated using a given template """ job_a = JobDefinition({ @@ -583,9 +583,9 @@ def test_select_jobs__template_id_field_qualifier(self): qual = FieldQualifier("id", matcher, self.origin, True) job_list = [job_a, templated_job_b, templated_job_c] expected_list = [templated_job_b, templated_job_c] - self.assertEqual(select_jobs(job_list, [qual]), expected_list) + self.assertEqual(select_units(job_list, [qual]), expected_list) - def test_select_jobs__excluded_templated_job(self): + def test_select_units__excluded_templated_job(self): """ verify that if a template id is included in the test plan, jobs that have been instantiated from it can still be excluded from the list of @@ -606,4 +606,4 @@ def test_select_jobs__excluded_templated_job(self): job_list = [templated_job_a, templated_job_b] qualifiers = [qual_incl, qual_excl] expected_list = [templated_job_a] - self.assertEqual(select_jobs(job_list, qualifiers), expected_list) + self.assertEqual(select_units(job_list, qualifiers), expected_list) diff --git a/checkbox-ng/plainbox/impl/session/assistant.py b/checkbox-ng/plainbox/impl/session/assistant.py index f7ef380263..7badf2a1e2 100644 --- a/checkbox-ng/plainbox/impl/session/assistant.py +++ b/checkbox-ng/plainbox/impl/session/assistant.py @@ -51,7 +51,7 @@ from plainbox.impl.result import MemoryJobResult from plainbox.impl.runner import JobRunnerUIDelegate from plainbox.impl.secure.origin import Origin -from plainbox.impl.secure.qualifiers import select_jobs +from plainbox.impl.secure.qualifiers import select_units from plainbox.impl.secure.qualifiers import FieldQualifier from plainbox.impl.secure.qualifiers import JobIdQualifier from plainbox.impl.secure.qualifiers import PatternMatcher @@ -802,7 +802,7 @@ def bootstrap(self): # NOTE: there is next-to-none UI here as bootstrap jobs are limited to # just resource jobs (including their dependencies) so there should be # very little UI required. - desired_job_list = select_jobs( + desired_job_list = select_units( self._context.state.job_list, [ plan.get_bootstrap_qualifier() @@ -823,7 +823,7 @@ def bootstrap(self): self.use_job_result(job.id, rb.get_result()) # Perform initial selection -- we want to run everything that is # described by the test plan that was selected earlier. - desired_job_list = select_jobs( + desired_job_list = select_units( self._context.state.job_list, [plan.get_qualifier() for plan in self._manager.test_plans] + self._exclude_qualifiers, @@ -866,7 +866,7 @@ def hand_pick_jobs(self, id_patterns: "Iterable[str]"): Origin("hand-pick"), ) ) - jobs = select_jobs(self._context.state.job_list, qualifiers) + jobs = select_units(self._context.state.job_list, qualifiers) self._context.state.update_desired_job_list(jobs) self._metadata.flags = { SessionMetaData.FLAG_INCOMPLETE, @@ -891,7 +891,7 @@ def get_bootstrap_todo_list(self): E.g. to inform the user about the progress """ UsageExpectation.of(self).enforce() - desired_job_list = select_jobs( + desired_job_list = select_units( self._context.state.job_list, [ plan.get_bootstrap_qualifier() @@ -926,7 +926,7 @@ def finish_bootstrap(self): UsageExpectation.of(self).enforce() # Perform initial selection -- we want to run everything that is # described by the test plan that was selected earlier. - desired_job_list = select_jobs( + desired_job_list = select_units( self._context.state.job_list, [plan.get_qualifier() for plan in self._manager.test_plans] + self._exclude_qualifiers @@ -1029,7 +1029,7 @@ def remove_all_filters(self): reigning job selection. """ UsageExpectation.of(self).enforce() - desired_job_list = select_jobs( + desired_job_list = select_units( self._context.state.job_list, [plan.get_qualifier() for plan in self._manager.test_plans], ) @@ -1171,7 +1171,7 @@ def get_mandatory_jobs(self) -> "Iterable[str]": test_plan = self._manager.test_plans[0] return [ job.id - for job in select_jobs( + for job in select_units( self._context.state.job_list, [test_plan.get_mandatory_qualifier()], ) diff --git a/checkbox-ng/plainbox/impl/session/state.py b/checkbox-ng/plainbox/impl/session/state.py index 658772d84b..e3a0843b01 100644 --- a/checkbox-ng/plainbox/impl/session/state.py +++ b/checkbox-ng/plainbox/impl/session/state.py @@ -32,7 +32,7 @@ from plainbox.impl.depmgr import DependencyDuplicateError from plainbox.impl.depmgr import DependencyError from plainbox.impl.depmgr import DependencySolver -from plainbox.impl.secure.qualifiers import select_jobs +from plainbox.impl.secure.qualifiers import select_units from plainbox.impl.session.jobs import JobState from plainbox.impl.session.jobs import UndesiredJobReadinessInhibitor from plainbox.impl.session.system_information import( @@ -568,7 +568,7 @@ def _update_mandatory_job_list(self): qualifier_list = [] for test_plan in self._test_plan_list: qualifier_list.append(test_plan.get_mandatory_qualifier()) - mandatory_job_list = select_jobs( + mandatory_job_list = select_units( self.state.job_list, qualifier_list) self.state.update_mandatory_job_list(mandatory_job_list) self.state.update_desired_job_list(self.state.desired_job_list) @@ -762,7 +762,7 @@ def trim_job_list(self, qualifier): :param qualifier: A qualifier that selects jobs to be removed :ptype qualifier: - IJobQualifier + IUnitQualifier :raises ValueError: If any of the jobs selected by the qualifier is on the desired job diff --git a/checkbox-ng/plainbox/impl/session/test_assistant.py b/checkbox-ng/plainbox/impl/session/test_assistant.py index a369955d97..cfbb84a0ce 100644 --- a/checkbox-ng/plainbox/impl/session/test_assistant.py +++ b/checkbox-ng/plainbox/impl/session/test_assistant.py @@ -193,3 +193,40 @@ def get_resumable_sessions(): self_mock.get_resumable_sessions.return_value = [session_mock] _ = SessionAssistant.resume_session(self_mock, "session_id") + + @mock.patch("plainbox.impl.session.state.select_units") + @mock.patch("plainbox.impl.unit.testplan.TestPlanUnit") + def test_bootstrap(self, mock_tpu, mock_su, mock_get_providers): + self_mock = mock.MagicMock() + SessionAssistant.bootstrap(self_mock) + # Bootstrapping involves updating the list of desired jobs twice: + # - one time to get the resource jobs + # - one time to generate jobs out of the resource jobs + self.assertEqual( + self_mock._context.state.update_desired_job_list.call_count, + 2 + ) + + @mock.patch("plainbox.impl.session.state.select_units") + def test_hand_pick_jobs(self, mock_su, mock_get_providers): + self_mock = mock.MagicMock() + SessionAssistant.hand_pick_jobs(self_mock, []) + self.assertEqual( + self_mock._context.state.update_desired_job_list.call_count, + 1 + ) + + @mock.patch("plainbox.impl.session.state.select_units") + @mock.patch("plainbox.impl.unit.testplan.TestPlanUnit") + def test_get_bootstrap_todo_list( + self, + mock_tpu, + mock_su, + mock_get_providers + ): + self_mock = mock.MagicMock() + SessionAssistant.get_bootstrap_todo_list(self_mock) + self.assertEqual( + self_mock._context.state.update_desired_job_list.call_count, + 1 + ) diff --git a/checkbox-ng/plainbox/impl/session/test_resume.py b/checkbox-ng/plainbox/impl/session/test_resume.py index 466a4de3e6..87533a5ae6 100644 --- a/checkbox-ng/plainbox/impl/session/test_resume.py +++ b/checkbox-ng/plainbox/impl/session/test_resume.py @@ -31,7 +31,7 @@ import gzip import json -from plainbox.abc import IJobQualifier +from plainbox.abc import IUnitQualifier from plainbox.abc import IJobResult from plainbox.impl.job import JobDefinition from plainbox.impl.resource import Resource @@ -86,13 +86,13 @@ def test_get_simple_match(self): # removal) self.assertEqual( self.obj.get_vote(JobDefinition({'id': 'foo'})), - IJobQualifier.VOTE_IGNORE) + IUnitQualifier.VOTE_IGNORE) self.assertEqual( self.obj.get_vote(JobDefinition({'id': 'bar'})), - IJobQualifier.VOTE_IGNORE) + IUnitQualifier.VOTE_IGNORE) self.assertEqual( self.obj.get_vote(JobDefinition({'id': 'froz'})), - IJobQualifier.VOTE_IGNORE) + IUnitQualifier.VOTE_IGNORE) # Jobs that are in the retain set are NOT designated self.assertEqual( self.obj.designates(JobDefinition({'id': 'bar'})), False) @@ -103,10 +103,10 @@ def test_get_simple_match(self): # retain set, ids are matched exactly, not by pattern. self.assertEqual( self.obj.get_vote(JobDefinition({'id': 'foobar'})), - IJobQualifier.VOTE_INCLUDE) + IUnitQualifier.VOTE_INCLUDE) self.assertEqual( self.obj.get_vote(JobDefinition({'id': 'fo'})), - IJobQualifier.VOTE_INCLUDE) + IUnitQualifier.VOTE_INCLUDE) class SessionResumeExceptionTests(TestCase): diff --git a/checkbox-ng/plainbox/impl/session/test_state.py b/checkbox-ng/plainbox/impl/session/test_state.py index 702fb822c9..438808ffed 100644 --- a/checkbox-ng/plainbox/impl/session/test_state.py +++ b/checkbox-ng/plainbox/impl/session/test_state.py @@ -1251,3 +1251,11 @@ def test_bulk_override_update(self): } SessionDeviceContext._bulk_override_update(self_mock) self.assertTrue(self_mock._override_update.called) + + @patch("plainbox.impl.session.state.select_units") + @patch("plainbox.impl.unit.testplan.TestPlanUnit.get_mandatory_qualifier") + def test_update_mandatory_job_list(self, mock_gmq, mock_su): + self_mock = MagicMock() + SessionDeviceContext._update_mandatory_job_list(self_mock) + self.assertTrue(self_mock.state.update_mandatory_job_list.called) + self.assertTrue(self_mock.state.update_desired_job_list.called) From 6c2103fd96ca5f187ab3892c93662e1a5aad3c3d Mon Sep 17 00:00:00 2001 From: Pierre Equoy Date: Thu, 21 Mar 2024 15:33:48 +0800 Subject: [PATCH 107/108] Add example to the Template unit template-summary field (Infra) (#1097) Add example to the Template unit template-summary field --- docs/reference/units/template.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/reference/units/template.rst b/docs/reference/units/template.rst index 109e99f088..2eafab1e37 100644 --- a/docs/reference/units/template.rst +++ b/docs/reference/units/template.rst @@ -48,6 +48,14 @@ Template-Specific Fields translation into other languages. It must be one line long, ideally it should be short (50-70 characters max). + Example: if a template has a ``summary`` field is set to: + + *Test serial connection for port {serial_port}* + + then its ``template-summary`` could be: + + *Test serial connection for every serial port on the system* + This field is optional (Checkbox will only advise you to provide one when running provider validation). From 604d36955eff8cba6ea50071f0aed635a02bfd44 Mon Sep 17 00:00:00 2001 From: Massimiliano Date: Thu, 21 Mar 2024 14:06:26 +0100 Subject: [PATCH 108/108] Fix resume forgetting previous job outcome (bugfix) (#1095) * Use old job result if already set * Subcommand use the old job result if set * Metabox scenario to test this * Clarify docstring --- .../checkbox_ng/launcher/subcommands.py | 9 ++- .../checkbox_ng/launcher/test_subcommands.py | 24 +++++++- .../plainbox/impl/session/remote_assistant.py | 8 ++- .../impl/session/test_remote_assistant.py | 43 +++++++++++++ .../metabox/metabox-provider/units/resume.pxu | 17 ++++++ .../scenarios/restart/agent_respawn.py | 60 ++++++++++++++++++- 6 files changed, 156 insertions(+), 5 deletions(-) diff --git a/checkbox-ng/checkbox_ng/launcher/subcommands.py b/checkbox-ng/checkbox_ng/launcher/subcommands.py index e3c84fe7bb..5fd74bcbdd 100644 --- a/checkbox-ng/checkbox_ng/launcher/subcommands.py +++ b/checkbox-ng/checkbox_ng/launcher/subcommands.py @@ -509,10 +509,15 @@ def _get_autoresume_outcome_last_job(self, metadata): is used to automatically resume a session and assign an outcome to the job that interrupted the session. If the interruption is due to a noreturn job (for example, reboot), the job will be marked as passed, - else, if the job made Checkbox crash, it will be marked as crash + else, if the job made Checkbox crash, it will be marked as crash. If + the job has a recorded outcome (so the session was interrupted after + assigning the outcome and before starting a new job) it will be used + instead. """ job_state = self.sa.get_job_state(metadata.running_job_name) - if "noreturn" in (job_state.job.flags or set()): + if job_state.result.outcome: + return job_state.result.outcome + elif "noreturn" in (job_state.job.flags or set()): return IJobResult.OUTCOME_PASS return IJobResult.OUTCOME_CRASH diff --git a/checkbox-ng/checkbox_ng/launcher/test_subcommands.py b/checkbox-ng/checkbox_ng/launcher/test_subcommands.py index ede86d5a45..4796680cbd 100644 --- a/checkbox-ng/checkbox_ng/launcher/test_subcommands.py +++ b/checkbox-ng/checkbox_ng/launcher/test_subcommands.py @@ -343,6 +343,9 @@ def test__get_autoresume_outcome_last_job_noreturn(self): self_mock = MagicMock() job_state = self_mock.sa.get_job_state() job_state.job.flags = "noreturn" + job_state.result.outcome = None + job_state.result.comments = None + metadata_mock = MagicMock() metadata_mock.running_job_name = "running_metadata_job_name" @@ -352,10 +355,13 @@ def test__get_autoresume_outcome_last_job_noreturn(self): self.assertEqual(outcome, IJobResult.OUTCOME_PASS) - def test__get_autoresume_outcome_last_job(self): + def test__get_autoresume_outcome_last_job_crashed(self): self_mock = MagicMock() job_state = self_mock.sa.get_job_state() job_state.job.flags = "" + job_state.result.outcome = None + job_state.result.comments = None + metadata_mock = MagicMock() metadata_mock.running_job_name = "running_metadata_job_name" @@ -365,6 +371,22 @@ def test__get_autoresume_outcome_last_job(self): self.assertEqual(outcome, IJobResult.OUTCOME_CRASH) + def test__get_autoresume_outcome_last_job_already_set(self): + self_mock = MagicMock() + job_state = self_mock.sa.get_job_state() + job_state.job.flags = "" + job_state.result.outcome = IJobResult.OUTCOME_PASS + job_state.result.comments = "Pre resume comment" + + metadata_mock = MagicMock() + metadata_mock.running_job_name = "running_metadata_job_name" + + outcome = Launcher._get_autoresume_outcome_last_job( + self_mock, metadata_mock + ) + + self.assertEqual(outcome, IJobResult.OUTCOME_PASS) + def test__resumed_session(self): self_mock = MagicMock() diff --git a/checkbox-ng/plainbox/impl/session/remote_assistant.py b/checkbox-ng/plainbox/impl/session/remote_assistant.py index abf0d35b23..86caca45ba 100644 --- a/checkbox-ng/plainbox/impl/session/remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/remote_assistant.py @@ -765,7 +765,13 @@ def resume_by_id(self, session_id=None, overwrite_result_dict={}): result_dict["outcome"] = IJobResult.OUTCOME_PASS except (json.JSONDecodeError, FileNotFoundError): the_job = self._sa.get_job(self._last_job) - if the_job.plugin == "shell": + job_state = self._sa.get_job_state(the_job.id) + # the last running job already had a result + if job_state.result.outcome: + result_dict["outcome"] = job_state.result.outcome + result_dict["comments"] = job_state.result.comments or "" + # job didnt have a result, lets automatically calculate it + elif the_job.plugin == "shell": if "noreturn" in the_job.get_flag_set(): result_dict["outcome"] = IJobResult.OUTCOME_PASS result_dict["comments"] = ( diff --git a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py index 06a3491932..f2fe6e1355 100644 --- a/checkbox-ng/plainbox/impl/session/test_remote_assistant.py +++ b/checkbox-ng/plainbox/impl/session/test_remote_assistant.py @@ -229,6 +229,8 @@ def test_resume_by_id_with_result_no_file_noreturn( rsa._sa.get_resumable_sessions.return_value = [resumable_session] rsa.get_rerun_candidates.return_value = [] rsa._state = remote_assistant.Idle + job_state = rsa._sa.get_job_state.return_value + job_state.result.outcome = None mock_meta = mock.Mock() mock_meta.app_blob = b'{"testplan_id": "tp_id"}' @@ -267,6 +269,8 @@ def test_resume_by_id_with_result_no_file_normal(self, mock_load_configs): rsa._sa.get_resumable_sessions.return_value = [resumable_session] rsa.get_rerun_candidates.return_value = [] rsa._state = remote_assistant.Idle + job_state = rsa._sa.get_job_state.return_value + job_state.result.outcome = None mock_meta = mock.Mock() mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' @@ -294,6 +298,43 @@ def test_resume_by_id_with_result_no_file_normal(self, mock_load_configs): rsa._sa.use_job_result.assert_called_with(rsa._last_job, mjr, True) + @mock.patch("plainbox.impl.session.remote_assistant.load_configs") + def test_resume_by_id_with_result_no_file_already_set( + self, mock_load_configs + ): + rsa = mock.Mock() + resumable_session = mock.Mock() + resumable_session.id = "session_id" + rsa._sa.get_resumable_sessions.return_value = [resumable_session] + rsa.get_rerun_candidates.return_value = [] + rsa._state = remote_assistant.Idle + job_state = rsa._sa.get_job_state.return_value + job_state.result.outcome = IJobResult.OUTCOME_PASS + job_state.result.comments = None + + mock_meta = mock.Mock() + mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' + + rsa.resume_session.return_value = mock_meta + os_path_exists_mock = mock.Mock() + + rsa._sa.get_job.return_value.plugin = "shell" + + with mock.patch("os.path.exists", os_path_exists_mock): + os_path_exists_mock.return_value = False + rsa._sa.get_job.return_value.get_flag_set.return_value = {} + + remote_assistant.RemoteSessionAssistant.resume_by_id(rsa) + + mjr = MemoryJobResult( + { + "outcome": IJobResult.OUTCOME_PASS, + "comments": "", + } + ) + + rsa._sa.use_job_result.assert_called_with(rsa._last_job, mjr, True) + @mock.patch("plainbox.impl.session.remote_assistant.load_configs") def test_resume_by_id_with_result_file_not_json(self, mock_load_configs): rsa = mock.Mock() @@ -302,6 +343,8 @@ def test_resume_by_id_with_result_file_not_json(self, mock_load_configs): rsa._sa.get_resumable_sessions.return_value = [resumable_session] rsa.get_rerun_candidates.return_value = [] rsa._state = remote_assistant.Idle + job_state = rsa._sa.get_job_state.return_value + job_state.result.outcome = None mock_meta = mock.Mock() mock_meta.app_blob = b'{"launcher": "", "testplan_id": "tp_id"}' diff --git a/metabox/metabox/metabox-provider/units/resume.pxu b/metabox/metabox/metabox-provider/units/resume.pxu index f311648191..0b7fb520df 100644 --- a/metabox/metabox/metabox-provider/units/resume.pxu +++ b/metabox/metabox/metabox-provider/units/resume.pxu @@ -16,9 +16,26 @@ command: PID=`pgrep -f checkbox-cli` kill $PID +id: pass-rerun +unit: job +_summary: Test that passes only at the second try +flags: simple +command: + [ -f $PLAINBOX_SESSION_SHARE/will_pass.txt ] && exit 0 + echo "will pass next time" > $PLAINBOX_SESSION_SHARE/will_pass.txt + exit 1 + unit: test plan id: checkbox-crash-then-reboot _name: Checkbox crash then reboot include: checkbox-crasher reboot-emulator + +unit: test plan +id: pass-only-rerun +_name: Pass only on rerun +_summary: Test that passes only at the secont try +include: + pass-rerun + basic-shell-failing diff --git a/metabox/metabox/scenarios/restart/agent_respawn.py b/metabox/metabox/scenarios/restart/agent_respawn.py index 53c096813c..1d571503ef 100644 --- a/metabox/metabox/scenarios/restart/agent_respawn.py +++ b/metabox/metabox/scenarios/restart/agent_respawn.py @@ -22,7 +22,8 @@ SelectTestPlan, Send, Expect, - Start + Start, + Signal, ) from metabox.core.scenario import Scenario from metabox.core.utils import tag @@ -102,3 +103,60 @@ class AutoResumeAfterCrashAutoLocal(Scenario): AssertPrinted("job passed"), AssertPrinted("Emulate the reboot"), ] + + +@tag("resume", "manual") +class ResumeAfterFinishPreserveOutputLocal(Scenario): + modes = ["local"] + launcher = "# no launcher" + steps = [ + Start(), + Expect("Select test plan"), + SelectTestPlan("2021.com.canonical.certification::pass-only-rerun"), + Send(keys.KEY_ENTER), + Expect("Press (T) to start"), + Send("T"), + Expect("Select jobs to re-run"), + Send(keys.KEY_SPACE), + Expect("[X]"), + Send("r"), + Expect("Select jobs to re-run"), + Signal(keys.SIGINT), + Start(), + Expect("Select jobs to re-run"), + Send("f"), + Expect("job passed"), + Expect("job failed"), + ] + +@tag("resume", "manual") +class ResumeAfterFinishPreserveOutputRemote(Scenario): + modes = ["remote"] + launcher = "# no launcher" + steps = [ + Start(), + Expect("Select test plan"), + SelectTestPlan("2021.com.canonical.certification::pass-only-rerun"), + Send(keys.KEY_ENTER), + Expect("Press (T) to start"), + Send("T"), + Expect("Select jobs to re-run"), + Send(keys.KEY_SPACE), + Expect("[X]"), + Send("r"), + Expect("Select jobs to re-run"), + Signal(keys.SIGINT), + Expect("(X) Nothing"), + Send(keys.KEY_DOWN + keys.KEY_SPACE), + Expect("(X) Stop"), + Send(keys.KEY_DOWN + keys.KEY_SPACE), + Expect("(X) Pause"), + Send(keys.KEY_DOWN + keys.KEY_SPACE), + Expect("(X) Exit"), + Send(keys.KEY_ENTER), + Start(), + Expect("Select jobs to re-run"), + Send("f"), + Expect("job passed"), + Expect("job failed"), + ]