diff --git a/rebench/configurator.py b/rebench/configurator.py index 6b6392e1..e389860d 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -294,6 +294,7 @@ def get_executor(self, executor_name, run_details, variables, action): executor = Executor.compile( executor_name, self._executors[executor_name], run_details, variables, self.build_commands, action) + return executor def get_suite(self, suite_name): diff --git a/rebench/model/executor.py b/rebench/model/executor.py index f03794e3..4b30c86a 100644 --- a/rebench/model/executor.py +++ b/rebench/model/executor.py @@ -18,6 +18,7 @@ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import os +import subprocess from .build_cmd import BuildCommand from .exp_run_details import ExpRunDetails @@ -26,8 +27,7 @@ from ..configuration_error import ConfigurationError -class Executor(object): - +class Executor: @classmethod def compile(cls, executor_name, executor, run_details, variables, build_commands, action): path = executor.get('path') @@ -35,50 +35,69 @@ def compile(cls, executor_name, executor, run_details, variables, build_commands path = os.path.abspath(path) executable = executor.get('executable') args = executor.get('args') + version_command = executor.get('version_command') + version_string = executor.get('version_string') + version_git = executor.get('version_git') build = BuildCommand.create_commands(executor.get('build'), build_commands, path) - description = executor.get('description') desc = executor.get('desc') env = executor.get('env') - profiler = Profiler.compile(executor.get('profiler')) - run_details = ExpRunDetails.compile(executor, run_details) variables = ExpVariables.compile(executor, variables) if action == "profile" and len(profiler) == 0: - raise ConfigurationError("Executor " + executor_name + " is configured for profiling, " - + "but no profiler details are given.") + raise ConfigurationError(f"Executor {executor_name} is configured for profiling, " + "but no profiler details are given.") - return Executor(executor_name, path, executable, args, build, description or desc, - profiler, run_details, variables, action, env) + return Executor(executor_name, path, executable, args, + version_command, version_string, version_git, build, + description or desc, profiler, run_details, variables, action, env) - def __init__(self, name, path, executable, args, build, description, + def __init__(self, name, path, executable, args, + version_command, version_string, version_git, build, description, profiler, run_details, variables, action, env): """Specializing the executor details in the run definitions with the settings from - the executor definitions + the executor definitions """ self.name = name self.path = path self.executable = executable self.args = args - + self.version_command = version_command + self.version_string = version_string + self.version_git = version_git self.build = build self.description = description self.profiler = profiler - self.run_details = run_details self.variables = variables self.env = env - self.action = action + def get_version(self): + if self.version_command: + try: + result = subprocess.run(self.version_command, shell=True, + check=True, capture_output=True, text=True) + return result.stdout.strip() + except subprocess.CalledProcessError as e: + return e.stderr.strip() + elif self.version_string: + return self.version_string + elif self.version_git: + try: + result = subprocess.run(self.version_git, shell=True, + check=True, capture_output=True, text=True) + return result.stdout.strip() + except subprocess.CalledProcessError as e: + return e.stderr.strip() + else: + return None + def as_dict(self): - result = { - 'name': self.name, - 'desc': self.description - } + result = {'name': self.name, 'desc': self.description} if self.build: result['build'] = [b.as_dict() for b in self.build] return result diff --git a/rebench/rebench-schema.yml b/rebench/rebench-schema.yml index 7c6669ed..74a7dcf9 100644 --- a/rebench/rebench-schema.yml +++ b/rebench/rebench-schema.yml @@ -277,6 +277,18 @@ schema;executor_type: type: str desc: Argument given to `perf` when processing the recording default: report -g graph --no-children --stdio + version_command: + type: str + required: false + desc: Command to retrieve the version of the executable. + version_string: + type: str + required: false + desc: Explicit version string provided by the user. + version_git: + type: str + required: false + desc: Command to retrieve the Git version of the executable. schema;exp_suite_type: desc: A list of suites diff --git a/rebench/tests/bugs/issue_4_run_equality_and_params_test.py b/rebench/tests/bugs/issue_4_run_equality_and_params_test.py index 0126c2d0..e78a102f 100644 --- a/rebench/tests/bugs/issue_4_run_equality_and_params_test.py +++ b/rebench/tests/bugs/issue_4_run_equality_and_params_test.py @@ -36,7 +36,7 @@ def setUp(self): @staticmethod def _create_template_run_id(): executor = Executor('MyVM', 'foo_bar_path', 'foo_bar_bin', - None, None, None, None, None, None, "benchmark", {}) + None, None, None, None, None, None, None, None, None, "benchmark", {}) suite = BenchmarkSuite("MySuite", executor, '', '%(benchmark)s %(cores)s %(input)s', None, None, [], None, None, None) benchmark = Benchmark("TestBench", "TestBench", None, suite, None, @@ -46,7 +46,7 @@ def _create_template_run_id(): @staticmethod def _create_hardcoded_run_id(): executor = Executor('MyVM', 'foo_bar_path', 'foo_bar_bin', - None, None, None, None, None, None, "benchmark", {}) + None, None, None, None, None, None, None, None, None, "benchmark", {}) suite = BenchmarkSuite('MySuite', executor, '', '%(benchmark)s %(cores)s 2 3', None, None, [], None, None, None) benchmark = Benchmark("TestBench", "TestBench", None, suite, diff --git a/rebench/tests/executor_test.py b/rebench/tests/executor_test.py index a7e62583..c0dc7d6c 100644 --- a/rebench/tests/executor_test.py +++ b/rebench/tests/executor_test.py @@ -19,17 +19,18 @@ # IN THE SOFTWARE. import unittest import os +import subprocess +from ..model.executor import Executor as RebenchExecutor from .persistence import TestPersistence from .rebench_test_case import ReBenchTestCase -from ..rebench import ReBench -from ..executor import Executor, BatchScheduler, RandomScheduler, RoundRobinScheduler -from ..configurator import Configurator, load_config +from ..rebench import ReBench +from ..executor import Executor, BatchScheduler, RandomScheduler, RoundRobinScheduler +from ..configurator import Configurator, load_config from ..model.measurement import Measurement -from ..persistence import DataStore +from ..persistence import DataStore from ..ui import UIError -from ..reporter import Reporter - +from ..reporter import Reporter class ExecutorTest(ReBenchTestCase): @@ -219,6 +220,99 @@ def test_determine_exp_name_and_filters_only_others(self): self.assertEqual(exp_name, None) self.assertEqual(exp_filter, ['e:bar', 's:b']) + def test_version_command(self): + executor = RebenchExecutor( + "TestExecutor", None, None, None, "python --version", + None, None, None, None, None, None, None, None, None + ) + + try: + result = subprocess.run( + executor.version_command, shell=True, check=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + ) + version_output = result.stdout.strip() + except subprocess.CalledProcessError as e: + version_output = e.stderr.strip() + self.assertTrue("Python" in version_output) + + def test_version_command_in_config(self): + cnf = Configurator(load_config(self._path + '/small_with_version.conf'), + DataStore(self.ui), + self.ui, None, data_file=self._tmp_file) + runs = cnf.get_runs() + executor = list(runs)[0].benchmark.suite.executor + + self.assertEqual(executor.version_command, "python --version") + + try: + result = subprocess.run( + executor.version_command, shell=True, check=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + ) + version_output = result.stdout.strip() + except subprocess.CalledProcessError as e: + version_output = e.stderr.strip() + + self.assertTrue("Python" in version_output) + + def test_version_string(self): + executor = RebenchExecutor( + "TestExecutor", None, None, None, None, "7.42", + None, None, None, None, None, None, None, None + ) + + version_output = executor.version_string + self.assertTrue("7.42" in version_output) + + def test_version_string_in_config(self): + cnf = Configurator(load_config(self._path + '/small_with_version.conf'), + DataStore(self.ui), + self.ui, None, data_file=self._tmp_file) + runs = cnf.get_runs() + executor = list(runs)[0].benchmark.suite.executor + + self.assertEqual(executor.version_string, "7.42") + + version_output = executor.version_string + self.assertTrue("7.42" in version_output) + + def test_version_git(self): + executor = RebenchExecutor( + "TestExecutor", None, None, None, None, None, + "git rev-parse HEAD", None, None, None, None, None, None, None + ) + + try: + result = subprocess.run( + executor.version_git, shell=True, check=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + ) + version_output = result.stdout.strip() + except subprocess.CalledProcessError as e: + version_output = e.stderr.strip() + self.assertTrue(len(version_output) > 0) + + def test_version_git_in_config(self): + cnf = Configurator(load_config(self._path + '/small_with_version.conf'), + DataStore(self.ui), + self.ui, None, data_file=self._tmp_file) + runs = cnf.get_runs() + executor = list(runs)[0].benchmark.suite.executor + + self.assertEqual(executor.version_git, "git rev-parse HEAD") + + try: + result = subprocess.run( + executor.version_git, shell=True, check=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + ) + version_output = result.stdout.strip() + except subprocess.CalledProcessError as e: + version_output = e.stderr.strip() + + self.assertTrue(len(version_output) > 0) + class _TestReporter(Reporter): __test__ = False # This is not a test class diff --git a/rebench/tests/persistency.conf b/rebench/tests/persistency.conf index 99dbaec5..a082e5d5 100644 --- a/rebench/tests/persistency.conf +++ b/rebench/tests/persistency.conf @@ -3,7 +3,7 @@ # this run definition will be chosen if no parameters are given to rebench.py default_experiment: Test -default_data_file: 'persistency.data' +default_data_file: 'persistency.data' reporting: codespeed: diff --git a/rebench/tests/persistency_test.py b/rebench/tests/persistency_test.py index 6ad6425b..9d53f575 100644 --- a/rebench/tests/persistency_test.py +++ b/rebench/tests/persistency_test.py @@ -45,7 +45,9 @@ class PersistencyTest(ReBenchTestCase): def test_de_serialization(self): data_store = DataStore(self.ui) executor = ExecutorConf("MyVM", '', '', - None, None, None, None, None, None, "benchmark", {}) + None, None, None, None, + None, None, None, None, + None, "benchmark", {}) suite = BenchmarkSuite("MySuite", executor, '', '', None, None, None, None, None, None) benchmark = Benchmark("Test Bench [>", "Test Bench [>", None, diff --git a/rebench/tests/small_with_version.conf b/rebench/tests/small_with_version.conf new file mode 100644 index 00000000..1265f6f1 --- /dev/null +++ b/rebench/tests/small_with_version.conf @@ -0,0 +1,34 @@ +# Config file for ReBench +# Config format is YAML (see http://yaml.org/ for detailed spec) + +# this run definition will be chosen if no parameters are given to rebench.py +default_experiment: Test +default_data_file: 'small.data' + +# general configuration for runs +runs: + invocations: 10 + retries_after_failure: 3 + +benchmark_suites: + Suite: + gauge_adapter: TestExecutor + command: TestBenchMarks ~/suiteFolder/%(benchmark)s + benchmarks: + - Bench1 + - Bench2 + +executors: + TestRunner1: + path: ~/PycharmProjects/ReBench/rebench/tests + executable: test-vm1.py + version_command: "python --version" + version_string: "7.42" + version_git: "git rev-parse HEAD" + +experiments: + Test: + suites: + - Suite + executions: + - TestRunner1 \ No newline at end of file