Skip to content

Commit

Permalink
Merge PR #170: Enable settings to be marked as important
Browse files Browse the repository at this point in the history
  • Loading branch information
smarr authored Nov 8, 2021
2 parents 0037596 + bbca98f commit c3715f3
Show file tree
Hide file tree
Showing 8 changed files with 205 additions and 15 deletions.
28 changes: 28 additions & 0 deletions rebench/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,34 @@
# IN THE SOFTWARE.


def prefer_important(val, default):
if val is None:
return default
if is_marked_important(val):
return val
if is_marked_important(default):
return default
return val


def is_marked_important(val):
if isinstance(val, int):
return False
return str(val)[-1] == "!"


def remove_important(val):
if val is None:
return None

if isinstance(val, int):
return val

if val[-1] == "!":
return int(val[:-1])
return int(val)


def none_or_int(value):
if value:
return int(value)
Expand Down
1 change: 1 addition & 0 deletions rebench/model/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def compile(cls, bench, suite, data_store):
codespeed_name = details.get('codespeed_name', None)

run_details = ExpRunDetails.compile(details, suite.run_details)
run_details.resolve_override_and_important()
variables = ExpVariables.compile(details, suite.variables)

return Benchmark(name, command, gauge_adapter, suite,
Expand Down
21 changes: 17 additions & 4 deletions rebench/model/exp_run_details.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,16 @@
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from . import none_or_int, none_or_float, none_or_bool
from . import none_or_int, none_or_float, none_or_bool, remove_important, prefer_important


class ExpRunDetails(object):

@classmethod
def compile(cls, config, defaults):
invocations = none_or_int(config.get('invocations', defaults.invocations))
iterations = none_or_int(config.get('iterations', defaults.iterations))
warmup = none_or_int(config.get('warmup', defaults.warmup))
invocations = prefer_important(config.get('invocations'), defaults.invocations)
iterations = prefer_important(config.get('iterations'), defaults.iterations)
warmup = prefer_important(config.get('warmup'), defaults.warmup)

min_iteration_time = none_or_int(config.get('min_iteration_time',
defaults.min_iteration_time))
Expand Down Expand Up @@ -75,6 +75,19 @@ def __init__(self, invocations, iterations, warmup, min_iteration_time,
self.invocations_override = invocations_override
self.iterations_override = iterations_override

def resolve_override_and_important(self):
# resolve overrides
if self.invocations_override is not None:
self.invocations = self.invocations_override

if self.iterations_override is not None:
self.iterations = self.iterations_override

# resolve important tags
self.invocations = remove_important(self.invocations)
self.iterations = remove_important(self.iterations)
self.warmup = remove_important(self.warmup)

def as_dict(self):
return {
'warmup': self.warmup,
Expand Down
10 changes: 2 additions & 8 deletions rebench/model/run_id.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,17 +67,11 @@ def retries_after_failure(self):

@property
def iterations(self):
run_details = self.benchmark.run_details
if run_details.iterations_override is not None:
return run_details.iterations_override
return run_details.iterations
return self.benchmark.run_details.iterations

@property
def invocations(self):
run_details = self.benchmark.run_details
if run_details.invocations_override is not None:
return run_details.invocations_override
return run_details.invocations
return self.benchmark.run_details.invocations

@property
def completed_invocations(self):
Expand Down
9 changes: 6 additions & 3 deletions rebench/rebench-schema.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,22 @@ schema;runs_type:
type: map
mapping: &EXP_RUN_DETAILS
invocations:
type: int
type: text
# pattern: \d+!?
# default: 1 # can't specify this here, because the defaults override settings
desc: |
The number of times an executor is executed a run.
iterations:
type: int
type: text
# pattern: \d+!?
# default: 1 # can't specify this here, because the defaults override settings
desc: |
The number of times a run is executed within an executor
invocation. This needs to be supported by a benchmark harness and
ReBench passes this value on to the harness or benchmark.
warmup:
type: int
type: text
# pattern: \d+!?
desc: |
Consider the first N iterations as warmup and ignore them in ReBench's summary
statistics. Note ,they are still persisted in the data file.
Expand Down
53 changes: 53 additions & 0 deletions rebench/tests/features/issue_169.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
benchmark_suites:
Suite1:
command: suite-1 %(benchmark)s %(iterations)s
iterations: 30
gauge_adapter: Time
benchmarks:
- Bench1:
iterations: 40
- Bench2

Suite2:
command: suite-2 %(benchmark)s %(iterations)s
invocations: 3
gauge_adapter: Time
benchmarks:
- Bench3

Suite3:
command: suite-3 %(benchmark)s %(iterations)s
gauge_adapter: Time
benchmarks:
- Bench4

executors:
TestRunner1:
iterations: 10
invocations: 1
path: .
executable: exe-1

TestRunner2:
iterations: 10
invocations: 1
path: .
executable: exe-2

experiments:

Exp1:
suites:
- Suite1
executions:
- TestRunner1
iterations: 20

Exp2:
executions:
- TestRunner2:
suites:
- Suite1
- Suite2
- Suite3
invocations: 2
45 changes: 45 additions & 0 deletions rebench/tests/features/issue_169_config_composition_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from ..rebench_test_case import ReBenchTestCase
from ...configurator import Configurator, load_config
from ...persistence import DataStore


class Issue169ConfigCompositionTest(ReBenchTestCase):

def setUp(self):
super(Issue169ConfigCompositionTest, self).setUp()
self._set_path(__file__)
self.cnf = Configurator(
load_config(self._path + '/issue_169.conf'),
DataStore(self.ui), self.ui, None, 'all')
self.runs = list(self.cnf.get_runs())
self.runs = sorted(self.runs, key=lambda e: e.cmdline())

self.cnf_important = Configurator(
load_config(self._path + '/issue_169_important.conf'),
DataStore(self.ui), self.ui, None, 'all')
self.runs_important = list(self.cnf_important.get_runs())
self.runs_important = sorted(self.runs_important, key=lambda e: e.cmdline())

def _assert(self, run, exe, bench, iterations, invocations):
self.assertEqual(run.benchmark.suite.executor.name, exe)
self.assertEqual(run.benchmark.name, bench)
self.assertEqual(run.iterations, iterations)
self.assertEqual(run.invocations, invocations)

def test_confirm_setting_priority(self):
self._assert(self.runs[0], "TestRunner1", "Bench1", 40, 1)
self._assert(self.runs[1], "TestRunner1", "Bench2", 30, 1)

self._assert(self.runs[2], "TestRunner2", "Bench1", 40, 1)
self._assert(self.runs[3], "TestRunner2", "Bench2", 30, 1)
self._assert(self.runs[4], "TestRunner2", "Bench3", 10, 3)
self._assert(self.runs[5], "TestRunner2", "Bench4", 10, 1)

def test_confirm_setting_priority_with_important_settings(self):
self._assert(self.runs_important[0], "TestRunner1", "Bench1", 30, 1)
self._assert(self.runs_important[1], "TestRunner1", "Bench2", 30, 1)

self._assert(self.runs_important[2], "TestRunner2", "Bench1", 30, 2)
self._assert(self.runs_important[3], "TestRunner2", "Bench2", 30, 2)
self._assert(self.runs_important[4], "TestRunner2", "Bench3", 10, 3)
self._assert(self.runs_important[5], "TestRunner2", "Bench4", 10, 2)
53 changes: 53 additions & 0 deletions rebench/tests/features/issue_169_important.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
benchmark_suites:
Suite1:
command: suite-1 %(benchmark)s %(iterations)s
iterations: 30!
gauge_adapter: Time
benchmarks:
- Bench1:
iterations: 40
- Bench2

Suite2:
command: suite-2 %(benchmark)s %(iterations)s
invocations: 3!
gauge_adapter: Time
benchmarks:
- Bench3

Suite3:
command: suite-3 %(benchmark)s %(iterations)s
gauge_adapter: Time
benchmarks:
- Bench4

executors:
TestRunner1:
iterations: 10!
invocations: 1
path: .
executable: exe-1

TestRunner2:
iterations: 10
invocations: 1
path: .
executable: exe-2

experiments:

Exp1:
suites:
- Suite1
executions:
- TestRunner1
iterations: 20

Exp2:
executions:
- TestRunner2:
suites:
- Suite1
- Suite2
- Suite3
invocations: 2!

0 comments on commit c3715f3

Please sign in to comment.