diff --git a/fitbenchmarking/cli/checkpoint_handler.py b/fitbenchmarking/cli/checkpoint_handler.py index eabdfb9d6..8727c4812 100644 --- a/fitbenchmarking/cli/checkpoint_handler.py +++ b/fitbenchmarking/cli/checkpoint_handler.py @@ -153,7 +153,7 @@ def generate_report(options_file="", additional_options=None, debug=False): ) checkpoint = Checkpoint(options=options) - results, unselected_minimizers, failed_problems = checkpoint.load() + results, unselected_minimizers, failed_problems, config = checkpoint.load() all_dirs = [] pp_dfs_all_prob_sets = {} @@ -164,6 +164,7 @@ def generate_report(options_file="", additional_options=None, debug=False): options=options, failed_problems=failed_problems[label], unselected_minimizers=unselected_minimizers[label], + config=config, ) pp_dfs_all_prob_sets[label] = pp_dfs diff --git a/fitbenchmarking/cli/main.py b/fitbenchmarking/cli/main.py index e98911d5a..55b740755 100755 --- a/fitbenchmarking/cli/main.py +++ b/fitbenchmarking/cli/main.py @@ -12,6 +12,8 @@ import sys from tempfile import NamedTemporaryFile +import numpy as np + import fitbenchmarking from fitbenchmarking.cli.checkpoint_handler import generate_report from fitbenchmarking.cli.exception_handler import exception_handler @@ -382,6 +384,14 @@ def run(problem_sets, additional_options=None, options_file="", debug=False): group_labels = [] result_dir = [] pp_dfs_all_prob_sets = {} + config = { + "python_version": ( + f"{sys.version_info.major}." + f"{sys.version_info.minor}." + f"{sys.version_info.micro}" + ), + "numpy_version": np.__version__, + } cp = Checkpoint(options=options) for sub_dir in problem_sets: @@ -441,6 +451,7 @@ def run(problem_sets, additional_options=None, options_file="", debug=False): options=options, failed_problems=failed_problems, unselected_minimizers=unselected_minimizers, + config=config, ) pp_dfs_all_prob_sets[label] = pp_dfs diff --git a/fitbenchmarking/core/results_output.py b/fitbenchmarking/core/results_output.py index 2c4697e89..f823e662b 100644 --- a/fitbenchmarking/core/results_output.py +++ b/fitbenchmarking/core/results_output.py @@ -45,7 +45,12 @@ @write_file def save_results( - options, results, group_name, failed_problems, unselected_minimizers + options, + results, + group_name, + failed_problems, + unselected_minimizers, + config, ): """ Create all results files and store them. @@ -61,8 +66,10 @@ def save_results( html output :type failed_problems: list :params unselected_minimizers: Dictionary containing unselected minimizers - based on the algorithm_type option + based on the algorithm_type option :type unselected_minimizers: dict + :params config: Dictionary containing env config + :type config: dict :return: Path to directory of group results, data for building the performance profile plots @@ -102,6 +109,7 @@ def save_results( pp_locations=pp_locations, failed_problems=failed_problems, unselected_minimzers=unselected_minimizers, + config=config, ) create_problem_level_index( diff --git a/fitbenchmarking/results_processing/tables.py b/fitbenchmarking/results_processing/tables.py index 0b2042aef..c96c40300 100644 --- a/fitbenchmarking/results_processing/tables.py +++ b/fitbenchmarking/results_processing/tables.py @@ -43,6 +43,7 @@ def create_results_tables( pp_locations, failed_problems, unselected_minimzers, + config, ): """ Saves the results of the fitting to html/csv tables. @@ -66,6 +67,8 @@ def create_results_tables( :params unselected_minimzers: Dictionary containing unselected minimizers based on the algorithm_type option :type unselected_minimzers: dict + :params config: Dictionary containing env config. + :type config: dict :return: filepaths to each table e.g {'acc': , 'runtime': ...} @@ -108,6 +111,13 @@ def create_results_tables( options.runtime_metric ) + config_str = ( + "\nThe results were generated using python" + f" {config['python_version']} and numpy " + f"{config['numpy_version']}." + ) + description[suffix] = description[suffix] + config_str + root = os.path.dirname(getfile(fitbenchmarking)) template_dir = os.path.join(root, "templates") diff --git a/fitbenchmarking/utils/checkpoint.py b/fitbenchmarking/utils/checkpoint.py index bdfb5536a..522c843dd 100644 --- a/fitbenchmarking/utils/checkpoint.py +++ b/fitbenchmarking/utils/checkpoint.py @@ -7,10 +7,13 @@ import json import os import pickle +import sys from base64 import a85decode, a85encode from tempfile import TemporaryDirectory from typing import Dict +import numpy as np + from fitbenchmarking.utils.exceptions import CheckpointError from fitbenchmarking.utils.fitbm_result import FittingResult from fitbenchmarking.utils.options import Options @@ -198,6 +201,14 @@ def finalise_group( { "failed_problems": failed_problems, "unselected_minimizers": unselected_minimizers, + "config": { + "python_version": ( + f"{sys.version_info.major}." + f"{sys.version_info.minor}." + f"{sys.version_info.micro}" + ), + "numpy_version": np.__version__, + }, }, indent=4, )[6:-1] @@ -266,6 +277,7 @@ def load(self): results = group["results"] unselected_minimizers[label] = group["unselected_minimizers"] failed_problems[label] = group["failed_problems"] + config = group["config"] # Unpickle problems so that we use 1 shared object for all results # per array @@ -323,7 +335,7 @@ def load(self): output[label].append(new_result) - return output, unselected_minimizers, failed_problems + return output, unselected_minimizers, failed_problems, config def _compress(value):