Skip to content

Commit

Permalink
Combine *args and **kwargs for multiple run from two different dicts …
Browse files Browse the repository at this point in the history
…to one
  • Loading branch information
ndrwpvlv committed Jun 25, 2021
1 parent bd6c554 commit cc53295
Show file tree
Hide file tree
Showing 6 changed files with 80 additions and 50 deletions.
38 changes: 18 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,24 +37,23 @@ from timench import Timench

tmnch = Timench()
repeats = 10
args_dict = { # dict structure: {case_name: [args] of function func(*args), }
'sleep_1s': [1.0, ],
'sleep_2s': [2.0, ],
'sleep_3s': [3.0, ]
env_args = { # dict structure: {case_name: [args, kwargs] of function func(*args, **kwargs), }
'sleep_1s': [[1.0, ], None],
'sleep_2s': [[2.0, ], None],
'sleep_3s': [[3.0, ], None]
}
kwargs_dict = None # dict structure {case_name: {kwargs} of function func(**kwargs)}

for _ in args_dict: # # Add functions to benchmark list
tmnch.add_func(_, time.sleep)
for case_name in env_args: # # Add functions to benchmark list
tmnch.add_func(case_name, time.sleep)
```
Run all benchmarks:
```python
tmnch.multiple_run(repeats, args_dict) # Run multiple benchmarks
tmnch.multiple_run(repeats, env_args) # Run multiple benchmarks
```
Output reports to terminal and txt-file:
```python
for _ in args_dict:
print(tmnch.get_report(_)) # Print to terminal all reports
for case_name in env_args:
print(tmnch.get_report(case_name)) # Print to terminal all reports

tmnch.write_reports('example_2_report.txt') # Write all reports to txt-file
```
Expand Down Expand Up @@ -111,20 +110,19 @@ from timench import Timench

tmnch = Timench()
repeats = 10
args_dict = { # dict structure: {case_name: [args] of function func(*args), }
'sleep_1s': [1.0, ],
'sleep_2s': [2.0, ],
'sleep_3s': [3.0, ]
env_args = { # dict structure: {case_name: [args, kwargs] of function func(*args, **kwargs), }
'sleep_1s': [[1.0, ], None],
'sleep_2s': [[2.0, ], None],
'sleep_3s': [[3.0, ], None]
}
kwargs_dict = None # dict structure {case_name: {kwargs} of function func(**kwargs)}

for _ in args_dict: # # Add functions to benchmark list
tmnch.add_func(_, time.sleep)
for case_name in env_args: # # Add functions to benchmark list
tmnch.add_func(case_name, time.sleep)

tmnch.multiple_run(repeats, args_dict) # Run multiple benchmarks
tmnch.multiple_run(repeats, env_args) # Run multiple benchmarks

for _ in args_dict:
print(tmnch.get_report(_)) # Print to terminal all reports
for case_name in env_args:
print(tmnch.get_report(case_name)) # Print to terminal all reports

tmnch.write_reports('example_2_report.txt') # Write all reports to txt-file
```
Expand Down
15 changes: 7 additions & 8 deletions examples/example_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,18 @@

tmnch = Timench()
repeats = 10
args_dict = { # dict structure: {case_name: [args] of function func(*args), }
'sleep_1s': [1.0, ],
'sleep_2s': [2.0, ],
'sleep_3s': [3.0, ]
env_args = { # dict structure: {case_name: [args, kwargs] of function func(*args, **kwargs), }
'sleep_1s': [[1.0, ], None],
'sleep_2s': [[2.0, ], None],
'sleep_3s': [[3.0, ], None]
}
kwargs_dict = None # dict structure {case_name: {kwargs} of function func(**kwargs)}

for case_name in args_dict: # # Add functions to benchmark list
for case_name in env_args: # # Add functions to benchmark list
tmnch.add_func(case_name, time.sleep)

tmnch.multiple_run(repeats, args_dict) # Run multiple benchmarks
tmnch.multiple_run(repeats, env_args) # Run multiple benchmarks

for case_name in args_dict:
for case_name in env_args:
print(tmnch.get_report(case_name)) # Print to terminal all reports

tmnch.write_reports('example_2_report.txt') # Write all reports to txt-file
18 changes: 9 additions & 9 deletions examples/example_2_report.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,23 +4,23 @@ TIMENCH REPORT
Results for sleep_1s
---
Function: sleep
Total time = 10.0198 sec
Best loop time = 1.00085 sec
Average loop time = 1.00198 sec
Total time = 10.0421 sec
Best loop time = 1.00063 sec
Average loop time = 1.00421 sec
Repeats = 10

Results for sleep_2s
---
Function: sleep
Total time = 20.0214 sec
Best loop time = 2.00079 sec
Average loop time = 2.00214 sec
Total time = 20.0769 sec
Best loop time = 2.0007 sec
Average loop time = 2.00769 sec
Repeats = 10

Results for sleep_3s
---
Function: sleep
Total time = 30.0395 sec
Best loop time = 3.00125 sec
Average loop time = 3.00395 sec
Total time = 30.1248 sec
Best loop time = 3.00189 sec
Average loop time = 3.01248 sec
Repeats = 10
15 changes: 7 additions & 8 deletions examples/example_4.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,20 +41,19 @@ def first_n_words(text, n):
crop_string_2.__name__: crop_string_2,
first_n_words.__name__: first_n_words,
}
args_dict = { # dict structure: {case_name: [args] of function func(*args), }
crop_string_0.__name__: [string, 200, ],
crop_string_1.__name__: [string, 200, ],
crop_string_2.__name__: [string, 200, ],
first_n_words.__name__: [string, 200, ],
env_args = { # dict structure: {case_name: [args, kwargs] of function func(*args, **kwargs), }
crop_string_0.__name__: [[string, 200, ], None],
crop_string_1.__name__: [[string, 200, ], None],
crop_string_2.__name__: [[string, 200, ], None],
first_n_words.__name__: [[string, 200, ], None],
}
kwargs_dict = None # dict structure {case_name: {kwargs} of function func(**kwargs)}

for case_name in funcs_dict: # # Add functions to benchmark list
tmnch.add_func(case_name, funcs_dict[case_name])

tmnch.multiple_run(repeats, args_dict) # Run multiple benchmarks
tmnch.multiple_run(repeats, env_args) # Run multiple benchmarks

for case_name in args_dict:
for case_name in env_args:
print(tmnch.get_report(case_name)) # Print to terminal all reports

tmnch.write_reports('example_4_report.txt') # Write all reports to txt-file
34 changes: 34 additions & 0 deletions examples/example_4_report.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
TIMENCH REPORT
---

Results for crop_string_0
---
Function: crop_string_0
Total time = 0.917531 sec
Best loop time = 2.8362e-05 sec
Average loop time = 9.17531e-05 sec
Repeats = 10000

Results for crop_string_1
---
Function: crop_string_1
Total time = 0.809906 sec
Best loop time = 2.8198e-05 sec
Average loop time = 8.09906e-05 sec
Repeats = 10000

Results for crop_string_2
---
Function: crop_string_2
Total time = 0.744246 sec
Best loop time = 1.506e-05 sec
Average loop time = 7.44246e-05 sec
Repeats = 10000

Results for first_n_words
---
Function: first_n_words
Total time = 0.647496 sec
Best loop time = 1.1646e-05 sec
Average loop time = 6.47496e-05 sec
Repeats = 10000
10 changes: 5 additions & 5 deletions timench/timench.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,17 +108,17 @@ def run(self, case_name: str, repeats: int, *args, **kwargs):
self.add_results(case_name, times, report)
return report

def multiple_run(self, repeats, args_dict: dict = None, kwargs_dict: dict = None):
def multiple_run(self, repeats, env_args: dict = None):
"""
Batch run for multiple functions
:param repeats: count of repeats as int
:param args_dict: dict with func(*args1) by case name - {case_name1: [args1],}
:param kwargs_dict: dict with func(**kwargs1) by case name - {case_name1: {**kwargs1},}
:param env_args: dict with func(*args1, **kwargs1) by case name - {case_name1: [args, kwargs],}
args = list(...), kwargs = dict(...)
:return: None
"""
for case_name in self.funcs:
self.run(case_name, repeats, *(args_dict.get(case_name) or [] if args_dict else []),
**(kwargs_dict.get(case_name) or {} if kwargs_dict else {}))
args_case, kwargs_case = env_args[case_name] or [[], {}]
self.run(case_name, repeats, *(args_case or []), **(kwargs_case or {}))

@staticmethod
def run_func(func, repeat_count: int = 1, *args, **kwargs):
Expand Down

0 comments on commit cc53295

Please sign in to comment.