Skip to content

Commit

Permalink
Added number of iterations/function evaluations to fitting report (fi…
Browse files Browse the repository at this point in the history
…tbenchmarking#1307)

* add number of iterations/function evaluations to fitting report

* fix failing test

* add iteration counts for matlab minimizers

* record iter count and func evals separately for minimizers that output both

* fix typo

Co-authored-by: RabiyaF <[email protected]>

* fix typo

Co-authored-by: RabiyaF <[email protected]>

* use str() instead of f-string

Co-authored-by: RabiyaF <[email protected]>

* address review comments

* ruff formatting fixes

* fix failing test

* fix failing test

---------

Co-authored-by: RabiyaF <[email protected]>
  • Loading branch information
jess-farmer and RabiyaF authored Nov 5, 2024
1 parent aa15cf7 commit d169dc8
Show file tree
Hide file tree
Showing 26 changed files with 747 additions and 496 deletions.
2 changes: 1 addition & 1 deletion docs/source/users/install_instructions/fitbenchmarking.rst
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ where valid strings ``option-x`` are:
* ``levmar`` -- installs the `levmar <http://users.ics.forth.gr/~lourakis/levmar/>`_ fitting package (suitable for Python up to 3.8, see :ref:`levmar-install`). Note that the interface we use also requires BLAS and LAPLACK to be installed on the system, and calls to this minimizer will fail if these libraries are not present.
* ``mantid`` -- installs the `h5py <https://pypi.org/project/h5py/>`_ and `pyyaml <https://pypi.org/project/PyYAML/>`_ modules.
* ``matlab`` -- installs the `dill <https://pypi.org/project/dill/>`_ module required to run matlab controllers in fitbenchmarking
* ``minuit`` -- installs the `Minuit <http://seal.web.cern.ch/seal/snapshot/work-packages/mathlibs/minuit/>`_ fitting package.
* ``minuit`` -- installs the `Minuit <https://scikit-hep.org/iminuit/>`_ fitting package.
* ``SAS`` -- installs the `Sasmodels <https://github.com/SasView/sasmodels>`_ fitting package and the `tinycc <https://pypi.org/project/tinycc/>`_ module.
* ``numdifftools`` -- installs the `numdifftools <https://numdifftools.readthedocs.io/en/latest/index.html>`_ numerical differentiation package.
* ``nlopt``-- installs the `NLopt <https://github.com/DanielBok/nlopt-python#installation>`_ fitting package.
Expand Down
13 changes: 12 additions & 1 deletion fitbenchmarking/controllers/base_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,12 @@ def __init__(self, cost_func):

self.par_names = self.problem.param_names

# save iteration count
self.iteration_count = None

# save number of function evaluations
self.func_evals = None

@property
def flag(self):
"""
Expand Down Expand Up @@ -465,7 +471,12 @@ def check_attributes(self):
A helper function which checks all required attributes are set
in software controllers
"""
values = {"_flag": int, "final_params": np.ndarray}
values = {
"_flag": int,
"final_params": np.ndarray,
"iteration_count": (int, type(None)),
"func_evals": (int, type(None)),
}

for attr_name, attr_type in values.items():
attr = getattr(self, attr_name)
Expand Down
5 changes: 5 additions & 0 deletions fitbenchmarking/controllers/ceres_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,3 +204,8 @@ def cleanup(self):
self.flag = 2

self.final_params = self.result

self.iteration_count = (
self.ceres_summary.num_successful_steps
+ self.ceres_summary.num_unsuccessful_steps
)
1 change: 1 addition & 0 deletions fitbenchmarking/controllers/dfo_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,3 +104,4 @@ def cleanup(self):
self.flag = 2

self.final_params = self._popt
self.func_evals = self._soln.nf
3 changes: 2 additions & 1 deletion fitbenchmarking/controllers/gradient_free_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ def setup(self):
for i in range(len(self.initial_params))
}

self.iteration_count = 1000
self.initialize = {"warm_start": param_dict}

def _feval(self, p):
Expand Down Expand Up @@ -151,7 +152,7 @@ def fit(self):
method_to_call = getattr(gfo, self.minimizer)

opt = method_to_call(self.search_space)
opt.search(self._feval, n_iter=1000, verbosity=False)
opt.search(self._feval, n_iter=self.iteration_count, verbosity=False)
self.results = opt.best_para
self._status = 0 if self.results is not None else 1

Expand Down
7 changes: 6 additions & 1 deletion fitbenchmarking/controllers/gsl_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ def __init__(self, cost_func):
self._abserror = None
self._relerror = None
self._maxits = None
self._nits = None

def _prediction_error(self, p, data=None):
"""
Expand Down Expand Up @@ -225,7 +226,7 @@ def fit(self):
"""
Run problem with GSL
"""
for _ in range(self._maxits):
for n in range(self._maxits):
status = self._solver.iterate()
# check if the method has converged
if self.minimizer in self._residual_methods:
Expand All @@ -244,6 +245,7 @@ def fit(self):
)
if status == errno.GSL_SUCCESS:
self.flag = 0
self._nits = n + 1
break
if status != errno.GSL_CONTINUE:
self.flag = 2
Expand All @@ -256,3 +258,6 @@ def cleanup(self):
will be read from
"""
self.final_params = self._solver.getx()
self.iteration_count = (
self._maxits if self._nits is None else self._nits
)
3 changes: 3 additions & 0 deletions fitbenchmarking/controllers/levmar_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,3 +122,6 @@ def cleanup(self):
self.flag = 1
else:
self.flag = 2

self.iteration_count = self._info[2]
self.func_evals = self._info[4]
2 changes: 2 additions & 0 deletions fitbenchmarking/controllers/lmfit_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,8 @@ def cleanup(self):
will be read from
"""

self.func_evals = self.lmfit_out.nfev

if self.lmfit_out.success:
self.flag = 0
else:
Expand Down
8 changes: 5 additions & 3 deletions fitbenchmarking/controllers/matlab_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def __init__(self, cost_func):
super().__init__(cost_func)
self._status = None
self.result = None
self._nits = None

def setup(self):
"""
Expand All @@ -59,12 +60,13 @@ def fit(self):
"""
Run problem with Matlab
"""
[self.result, _, exitflag] = self.eng.fminsearch(
[self.result, _, exitflag, output] = self.eng.fminsearch(
self.eng.workspace["eval_cost_mat"],
self.initial_params_mat,
nargout=3,
nargout=4,
)
self._status = int(exitflag)
self._nits = int(output["iterations"])

def cleanup(self):
"""
Expand All @@ -77,7 +79,7 @@ def cleanup(self):
self.flag = 1
else:
self.flag = 2

self.final_params = np.array(
self.result[0], dtype=np.float64
).flatten()
self.iteration_count = self._nits
1 change: 1 addition & 0 deletions fitbenchmarking/controllers/matlab_curve_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,3 +120,4 @@ def cleanup(self):
self.final_params = self.eng.coeffvalues(self.eng.workspace["fitobj"])[
0
]
self.iteration_count = int(self.eng.workspace["output"]["iterations"])
5 changes: 4 additions & 1 deletion fitbenchmarking/controllers/matlab_opt_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ def __init__(self, cost_func):
self.y_data_mat = None
self._status = None
self.result = None
self._nits = None

def setup(self):
"""
Expand Down Expand Up @@ -111,7 +112,7 @@ def fit(self):
"""
Run problem with Matlab Optimization Toolbox
"""
self.result, _, _, exitflag, _ = self.eng.lsqcurvefit(
self.result, _, _, exitflag, output = self.eng.lsqcurvefit(
self.eng.workspace["eval_func"],
self.initial_params_mat,
self.x_data_mat,
Expand All @@ -122,6 +123,7 @@ def fit(self):
nargout=5,
)
self._status = int(exitflag)
self._nits = output["iterations"]

def cleanup(self):
"""
Expand All @@ -139,3 +141,4 @@ def cleanup(self):
self.final_params = np.array(
self.result[0], dtype=np.float64
).flatten()
self.iteration_count = self._nits
1 change: 1 addition & 0 deletions fitbenchmarking/controllers/minuit_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,5 +122,6 @@ def cleanup(self):
else:
self.flag = 2

self.func_evals = self._minuit_problem.nfcn
self._popt = np.array(self._minuit_problem.values)
self.final_params = self._popt
11 changes: 7 additions & 4 deletions fitbenchmarking/controllers/ralfit_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ def __init__(self, cost_func):
self.param_ranges = None
self._status = None
self._popt = None
self._iter = None
self._options = {}

def setup(self):
Expand Down Expand Up @@ -161,25 +162,26 @@ def fit(self):
Run problem with RALFit.
"""
if self.cost_func.hessian:
self._popt = ral_nlls.solve(
(self._popt, inform) = ral_nlls.solve(
self.initial_params,
self.cost_func.eval_r,
self.cost_func.jac_res,
self.hes_eval,
options=self._options,
lower_bounds=self.param_ranges[0],
upper_bounds=self.param_ranges[1],
)[0]
)
else:
self._popt = ral_nlls.solve(
(self._popt, inform) = ral_nlls.solve(
self.initial_params,
self.cost_func.eval_r,
self.cost_func.jac_res,
options=self._options,
lower_bounds=self.param_ranges[0],
upper_bounds=self.param_ranges[1],
)[0]
)
self._status = 0 if self._popt is not None else 1
self._iter = inform["iter"]

def cleanup(self):
"""
Expand All @@ -192,3 +194,4 @@ def cleanup(self):
self.flag = 2

self.final_params = self._popt
self.iteration_count = self._iter
3 changes: 3 additions & 0 deletions fitbenchmarking/controllers/scipy_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,4 +153,7 @@ def cleanup(self):
else:
self.flag = 2

self.func_evals = self.result.nfev
self.iteration_count = self.result.nit

self.final_params = self._popt
20 changes: 7 additions & 13 deletions fitbenchmarking/controllers/scipy_go_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,7 @@ def __init__(self, cost_func):
super().__init__(cost_func)

self.support_for_bounds = True
self._popt = None
self._status = None
self._result = None
self._maxiter = None

def setup(self):
Expand Down Expand Up @@ -90,25 +89,20 @@ def fit(self):
fun = self.cost_func.eval_cost
bounds = self.value_ranges
algorithm = getattr(optimize, self.minimizer)
result = algorithm(fun, bounds, **kwargs)
self._popt = result.x
if result.success:
self._status = 0
elif "Maximum number of iteration" in result.message:
self._status = 1
else:
self._status = 2
self._result = algorithm(fun, bounds, **kwargs)

def cleanup(self):
"""
Convert the result to a numpy array and populate the variables results
will be read from.
"""
if self._status == 0:
if self._result.success:
self.flag = 0
elif self._status == 1:
elif "Maximum number of iteration reached" in self._result.message:
self.flag = 1
else:
self.flag = 2

self.final_params = self._popt
self.final_params = self._result.x
self.iteration_count = self._result.nit
self.func_evals = self._result.nfev
2 changes: 2 additions & 0 deletions fitbenchmarking/controllers/scipy_leastsq_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,4 +80,6 @@ def cleanup(self):
else:
self.flag = 2

self.func_evals = self.result[2]["nfev"]

self.final_params = self._popt
1 change: 1 addition & 0 deletions fitbenchmarking/controllers/scipy_ls_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,4 +110,5 @@ def cleanup(self):
else:
self.flag = 2

self.func_evals = self.result.nfev
self.final_params = self._popt
50 changes: 47 additions & 3 deletions fitbenchmarking/controllers/tests/test_controllers.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,49 @@ def test_check_invalid_final_params(self):
with self.assertRaises(exceptions.ControllerAttributeError):
controller.check_attributes()

def test_check_valid_iteration_count(self):
"""
BaseSoftwareController: Test iteration_count setting with valid value
"""
controller = DummyController(self.cost_func)
controller.final_params = [1, 2, 3, 4, 5]
controller.flag = 3
controller.iteration_count = 10
controller.check_attributes()

def test_check_invalid_iteration_count(self):
"""
BaseSoftwareController: Test iteration_count setting with invalid value
"""
controller = DummyController(self.cost_func)
controller.final_params = [1, 2, 3, 4, 5]
controller.flag = 3
controller.iteration_count = 10.5
with self.assertRaises(exceptions.ControllerAttributeError):
controller.check_attributes()

def test_check_valid_func_evals(self):
"""
BaseSoftwareController: Test func_evals setting with valid value
"""
controller = DummyController(self.cost_func)
controller.final_params = [1, 2, 3, 4, 5]
controller.flag = 3
controller.iteration_count = 10
controller.func_evals = 10
controller.check_attributes()

def test_check_invalid_func_evals(self):
"""
BaseSoftwareController: Test func_evals setting with invalid value
"""
controller = DummyController(self.cost_func)
controller.final_params = [1, 2, 3, 4, 5]
controller.flag = 3
controller.func_evals = 10.5
with self.assertRaises(exceptions.ControllerAttributeError):
controller.check_attributes()

def test_validate_minimizer_true(self):
"""
BaseSoftwareController: Test validate_minimizer with valid
Expand Down Expand Up @@ -1125,11 +1168,12 @@ def test_scipy_go(self):

self.shared_tests.controller_run_test(controller)

controller._status = 0
self.shared_tests.check_converged(controller)
controller._status = 1
controller._result.success = False
self.shared_tests.check_max_iterations(controller)
controller._status = 2
controller._result.message = [
"Maximum number of iteration NOT reached"
]
self.shared_tests.check_diverged(controller)

def test_gradient_free(self):
Expand Down
2 changes: 2 additions & 0 deletions fitbenchmarking/core/tests/test_fitting_benchmarking.py
Original file line number Diff line number Diff line change
Expand Up @@ -910,6 +910,8 @@ def test_benchmark_method(self):
"jacobian_tag",
"hessian_tag",
"costfun_tag",
"iteration_count",
"func_evals",
]:
assert getattr(r, attr) == expected["results"][ix][attr]
self.assertAlmostEqual(
Expand Down
12 changes: 12 additions & 0 deletions fitbenchmarking/results_processing/fitting_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,16 @@ def create_prob_group(result, support_pages_dir, options):
n_params = result.get_n_parameters()
list_params = n_params < 100

iteration_count = (
str(result.iteration_count)
if result.iteration_count
else "not available"
)

func_evals = (
str(result.func_evals) if result.func_evals else "not available"
)

if np.isnan(result.emissions):
emission_disp = "N/A"
else:
Expand Down Expand Up @@ -115,6 +125,8 @@ def create_prob_group(result, support_pages_dir, options):
n_params=n_params,
list_params=list_params,
n_data_points=result.get_n_data_points(),
iteration_count=iteration_count,
func_evals=func_evals,
)
)

Expand Down
Loading

0 comments on commit d169dc8

Please sign in to comment.