diff --git a/README.md b/README.md index 2786623..c4d9a5a 100644 --- a/README.md +++ b/README.md @@ -37,24 +37,23 @@ from timench import Timench tmnch = Timench() repeats = 10 -args_dict = { # dict structure: {case_name: [args] of function func(*args), } - 'sleep_1s': [1.0, ], - 'sleep_2s': [2.0, ], - 'sleep_3s': [3.0, ] +env_args = { # dict structure: {case_name: [args, kwargs] of function func(*args, **kwargs), } + 'sleep_1s': [[1.0, ], None], + 'sleep_2s': [[2.0, ], None], + 'sleep_3s': [[3.0, ], None] } -kwargs_dict = None # dict structure {case_name: {kwargs} of function func(**kwargs)} -for _ in args_dict: # # Add functions to benchmark list - tmnch.add_func(_, time.sleep) +for case_name in env_args: # # Add functions to benchmark list + tmnch.add_func(case_name, time.sleep) ``` Run all benchmarks: ```python -tmnch.multiple_run(repeats, args_dict) # Run multiple benchmarks +tmnch.multiple_run(repeats, env_args) # Run multiple benchmarks ``` Output reports to terminal and txt-file: ```python -for _ in args_dict: - print(tmnch.get_report(_)) # Print to terminal all reports +for case_name in env_args: + print(tmnch.get_report(case_name)) # Print to terminal all reports tmnch.write_reports('example_2_report.txt') # Write all reports to txt-file ``` @@ -111,20 +110,19 @@ from timench import Timench tmnch = Timench() repeats = 10 -args_dict = { # dict structure: {case_name: [args] of function func(*args), } - 'sleep_1s': [1.0, ], - 'sleep_2s': [2.0, ], - 'sleep_3s': [3.0, ] +env_args = { # dict structure: {case_name: [args, kwargs] of function func(*args, **kwargs), } + 'sleep_1s': [[1.0, ], None], + 'sleep_2s': [[2.0, ], None], + 'sleep_3s': [[3.0, ], None] } -kwargs_dict = None # dict structure {case_name: {kwargs} of function func(**kwargs)} -for _ in args_dict: # # Add functions to benchmark list - tmnch.add_func(_, time.sleep) +for case_name in env_args: # # Add functions to benchmark list + tmnch.add_func(case_name, time.sleep) -tmnch.multiple_run(repeats, args_dict) # Run multiple benchmarks +tmnch.multiple_run(repeats, env_args) # Run multiple benchmarks -for _ in args_dict: - print(tmnch.get_report(_)) # Print to terminal all reports +for case_name in env_args: + print(tmnch.get_report(case_name)) # Print to terminal all reports tmnch.write_reports('example_2_report.txt') # Write all reports to txt-file ``` diff --git a/examples/example_2.py b/examples/example_2.py index 8075b2b..c777a9c 100644 --- a/examples/example_2.py +++ b/examples/example_2.py @@ -8,19 +8,18 @@ tmnch = Timench() repeats = 10 -args_dict = { # dict structure: {case_name: [args] of function func(*args), } - 'sleep_1s': [1.0, ], - 'sleep_2s': [2.0, ], - 'sleep_3s': [3.0, ] +env_args = { # dict structure: {case_name: [args, kwargs] of function func(*args, **kwargs), } + 'sleep_1s': [[1.0, ], None], + 'sleep_2s': [[2.0, ], None], + 'sleep_3s': [[3.0, ], None] } -kwargs_dict = None # dict structure {case_name: {kwargs} of function func(**kwargs)} -for case_name in args_dict: # # Add functions to benchmark list +for case_name in env_args: # # Add functions to benchmark list tmnch.add_func(case_name, time.sleep) -tmnch.multiple_run(repeats, args_dict) # Run multiple benchmarks +tmnch.multiple_run(repeats, env_args) # Run multiple benchmarks -for case_name in args_dict: +for case_name in env_args: print(tmnch.get_report(case_name)) # Print to terminal all reports tmnch.write_reports('example_2_report.txt') # Write all reports to txt-file diff --git a/examples/example_2_report.txt b/examples/example_2_report.txt index 8968bbf..9e94d28 100644 --- a/examples/example_2_report.txt +++ b/examples/example_2_report.txt @@ -4,23 +4,23 @@ TIMENCH REPORT Results for sleep_1s --- Function: sleep -Total time = 10.0198 sec -Best loop time = 1.00085 sec -Average loop time = 1.00198 sec +Total time = 10.0421 sec +Best loop time = 1.00063 sec +Average loop time = 1.00421 sec Repeats = 10 Results for sleep_2s --- Function: sleep -Total time = 20.0214 sec -Best loop time = 2.00079 sec -Average loop time = 2.00214 sec +Total time = 20.0769 sec +Best loop time = 2.0007 sec +Average loop time = 2.00769 sec Repeats = 10 Results for sleep_3s --- Function: sleep -Total time = 30.0395 sec -Best loop time = 3.00125 sec -Average loop time = 3.00395 sec +Total time = 30.1248 sec +Best loop time = 3.00189 sec +Average loop time = 3.01248 sec Repeats = 10 diff --git a/examples/example_4.py b/examples/example_4.py index b1e0fe7..2a017b9 100644 --- a/examples/example_4.py +++ b/examples/example_4.py @@ -41,20 +41,19 @@ def first_n_words(text, n): crop_string_2.__name__: crop_string_2, first_n_words.__name__: first_n_words, } -args_dict = { # dict structure: {case_name: [args] of function func(*args), } - crop_string_0.__name__: [string, 200, ], - crop_string_1.__name__: [string, 200, ], - crop_string_2.__name__: [string, 200, ], - first_n_words.__name__: [string, 200, ], +env_args = { # dict structure: {case_name: [args, kwargs] of function func(*args, **kwargs), } + crop_string_0.__name__: [[string, 200, ], None], + crop_string_1.__name__: [[string, 200, ], None], + crop_string_2.__name__: [[string, 200, ], None], + first_n_words.__name__: [[string, 200, ], None], } -kwargs_dict = None # dict structure {case_name: {kwargs} of function func(**kwargs)} for case_name in funcs_dict: # # Add functions to benchmark list tmnch.add_func(case_name, funcs_dict[case_name]) -tmnch.multiple_run(repeats, args_dict) # Run multiple benchmarks +tmnch.multiple_run(repeats, env_args) # Run multiple benchmarks -for case_name in args_dict: +for case_name in env_args: print(tmnch.get_report(case_name)) # Print to terminal all reports tmnch.write_reports('example_4_report.txt') # Write all reports to txt-file diff --git a/examples/example_4_report.txt b/examples/example_4_report.txt new file mode 100644 index 0000000..42aee9b --- /dev/null +++ b/examples/example_4_report.txt @@ -0,0 +1,34 @@ +TIMENCH REPORT +--- + +Results for crop_string_0 +--- +Function: crop_string_0 +Total time = 0.917531 sec +Best loop time = 2.8362e-05 sec +Average loop time = 9.17531e-05 sec +Repeats = 10000 + +Results for crop_string_1 +--- +Function: crop_string_1 +Total time = 0.809906 sec +Best loop time = 2.8198e-05 sec +Average loop time = 8.09906e-05 sec +Repeats = 10000 + +Results for crop_string_2 +--- +Function: crop_string_2 +Total time = 0.744246 sec +Best loop time = 1.506e-05 sec +Average loop time = 7.44246e-05 sec +Repeats = 10000 + +Results for first_n_words +--- +Function: first_n_words +Total time = 0.647496 sec +Best loop time = 1.1646e-05 sec +Average loop time = 6.47496e-05 sec +Repeats = 10000 diff --git a/timench/timench.py b/timench/timench.py index 681e066..11c0759 100644 --- a/timench/timench.py +++ b/timench/timench.py @@ -108,17 +108,17 @@ def run(self, case_name: str, repeats: int, *args, **kwargs): self.add_results(case_name, times, report) return report - def multiple_run(self, repeats, args_dict: dict = None, kwargs_dict: dict = None): + def multiple_run(self, repeats, env_args: dict = None): """ Batch run for multiple functions :param repeats: count of repeats as int - :param args_dict: dict with func(*args1) by case name - {case_name1: [args1],} - :param kwargs_dict: dict with func(**kwargs1) by case name - {case_name1: {**kwargs1},} + :param env_args: dict with func(*args1, **kwargs1) by case name - {case_name1: [args, kwargs],} + args = list(...), kwargs = dict(...) :return: None """ for case_name in self.funcs: - self.run(case_name, repeats, *(args_dict.get(case_name) or [] if args_dict else []), - **(kwargs_dict.get(case_name) or {} if kwargs_dict else {})) + args_case, kwargs_case = env_args[case_name] or [[], {}] + self.run(case_name, repeats, *(args_case or []), **(kwargs_case or {})) @staticmethod def run_func(func, repeat_count: int = 1, *args, **kwargs):