forked from smarr/ReBench
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrebench.conf
81 lines (74 loc) · 2.81 KB
/
rebench.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# Config file for ReBench
# Config format is YAML (see http://yaml.org/ for detailed spec)
# this run definition will be chosen if no parameters are given to rebench.py
standard_experiment: Test
standard_data_file: 'test.data'
# general configuration for runs
runs:
number_of_data_points: 50
# min_runtime: 100 # give a warning if average runtime is below this value
# settings for quick runs, useful for fast feedback during experiments
quick_runs:
number_of_data_points: 3 # set a smaller number of measurements
max_time: 60 # or a maximum runtime in seconds
# definition of benchmark suites
# settings in the benchmark suite will override similar settings of the VM
benchmark_suites:
TestSuite1:
gauge_adapter: Test
# location: /Users/...
command: TestBenchMarks %(benchmark)s %(input)s %(variable)s
input_sizes: [1, 2, 10, 100, 1000]
benchmarks:
- Bench1
- Bench2:
extra_args: 6
max_runtime: 300
variable_values: # this is an other dimension, over which the runs need to be varied
- val1
- val2
TestSuite2:
gauge_adapter: Test
command: TestBenchMarks %(benchmark)s %(input)s %(variable)s
input_sizes: [1, 2, 10, 100, 1000]
cores: [7, 13, 55]
benchmarks:
- Bench1:
extra_args: 3
- Bench2
variable_values: # this is an other dimension, over which the runs need to be varied
- val1
- val2
# VMs have a name and are specified by a path and the binary to be executed
# optional: the number of cores for which the runs have to be executed
virtual_machines:
TestRunner1:
path: tests
binary: test-vm1.py
cores: [1, 4, 8]
TestRunner2:
path: tests
binary: test-vm2.py
# define the benchmarks to be executed for a re-executable benchmark run
# special definitions done here should override benchmark suite definitions, and
# VM definitions
experiments:
Test:
description: >
This run definition is used for testing.
It should try all possible settings and the generated out
will be compared to the expected one by the unit test(s)
benchmark:
- TestSuite1
- TestSuite2
input_sizes: 1
executions:
# List of VMs and Benchmarks/Benchmark Suites to be run on them
# benchmarks define here will override the ones defined for the whole run
#the following example is equivalent to the global run definition,
#but needs to be tested...
- TestRunner1:
benchmark: TestSuite1
- TestRunner1:
benchmark: TestSuite2
- TestRunner2