Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 35 additions & 4 deletions src/easyscience/fitting/minimizers/minimizer_bumps.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# SPDX-License-Identifier: BSD-3-Clause

import copy
import functools
import inspect
from typing import Callable
from typing import List
from typing import Optional
Expand All @@ -28,6 +30,19 @@
FIT_AVAILABLE_IDS_FILTERED.remove('pt')


class _EvalCounter:
def __init__(self, fn: Callable):
self._fn = fn
self.count = 0
self.__name__ = getattr(fn, '__name__', self.__class__.__name__)
self.__signature__ = inspect.signature(fn)
functools.update_wrapper(self, fn)

def __call__(self, *args, **kwargs):
self.count += 1
return self._fn(*args, **kwargs)


class Bumps(MinimizerBase):
"""
This is a wrapper to Bumps: https://bumps.readthedocs.io/
Expand All @@ -54,6 +69,7 @@ def __init__(
"""
super().__init__(obj=obj, fit_function=fit_function, minimizer_enum=minimizer_enum)
self._p_0 = {}
self._eval_counter: Optional[_EvalCounter] = None

@staticmethod
def all_methods() -> List[str]:
Expand Down Expand Up @@ -148,7 +164,7 @@ def fit(
try:
model_results = bumps_fit(problem, **method_dict, **minimizer_kwargs, **kwargs)
self._set_parameter_fit_result(model_results, stack_status, problem._parameters)
results = self._gen_fit_results(model_results)
results = self._gen_fit_results(model_results, max_evaluations=max_evaluations)
except Exception as e:
for key in self._cached_pars.keys():
self._cached_pars[key].value = self._cached_pars_vals[key][0]
Expand Down Expand Up @@ -200,7 +216,8 @@ def _make_model(self, parameters: Optional[List[BumpsParameter]] = None) -> Call
:return: Callable to make a bumps Curve model
:rtype: Callable
"""
fit_func = self._generate_fit_function()
fit_func = _EvalCounter(self._generate_fit_function())
self._eval_counter = fit_func

def _outer(obj):
def _make_func(x, y, weights):
Expand Down Expand Up @@ -249,7 +266,12 @@ def _set_parameter_fit_result(
if stack_status:
global_object.stack.endMacro()

def _gen_fit_results(self, fit_results, **kwargs) -> FitResults:
def _gen_fit_results(
self,
fit_results,
max_evaluations: Optional[int] = None,
**kwargs,
) -> FitResults:
"""Convert fit results into the unified `FitResults` format.

:param fit_result: Fit object which contains info on the fit
Expand All @@ -261,7 +283,10 @@ def _gen_fit_results(self, fit_results, **kwargs) -> FitResults:
for name, value in kwargs.items():
if getattr(results, name, False):
setattr(results, name, value)
results.success = fit_results.success
nit = getattr(fit_results, 'nit', 0)
stopped_on_budget = max_evaluations is not None and nit >= max_evaluations - 1

results.success = fit_results.success and not stopped_on_budget
pars = self._cached_pars
item = {}
for index, name in enumerate(self._cached_model.pars.keys()):
Expand All @@ -275,6 +300,12 @@ def _gen_fit_results(self, fit_results, **kwargs) -> FitResults:
results.y_obs = self._cached_model.y
results.y_calc = self.evaluate(results.x, minimizer_parameters=results.p)
results.y_err = self._cached_model.dy
results.n_evaluations = None if self._eval_counter is None else self._eval_counter.count
results.message = (
f'Fit stopped: reached maximum evaluations ({max_evaluations})'
if stopped_on_budget
else ''
)
# results.residual = results.y_obs - results.y_calc
# results.goodness_of_fit = np.sum(results.residual**2)
results.minimizer_engine = self.__class__
Expand Down
15 changes: 11 additions & 4 deletions src/easyscience/fitting/minimizers/minimizer_dfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,10 @@
model_results = self._dfo_fit(self._cached_pars, model, **kwargs)
self._set_parameter_fit_result(model_results, stack_status)
results = self._gen_fit_results(model_results, weights)
except FitError:
for key in self._cached_pars.keys():
self._cached_pars[key].value = self._cached_pars_vals[key][0]
raise

Check warning on line 128 in src/easyscience/fitting/minimizers/minimizer_dfo.py

View check run for this annotation

Codecov / codecov/patch

src/easyscience/fitting/minimizers/minimizer_dfo.py#L126-L128

Added lines #L126 - L128 were not covered by tests
except Exception as e:
for key in self._cached_pars.keys():
self._cached_pars[key].value = self._cached_pars_vals[key][0]
Expand Down Expand Up @@ -208,7 +212,7 @@
for name, value in kwargs.items():
if getattr(results, name, False):
setattr(results, name, value)
results.success = not bool(fit_results.flag)
results.success = fit_results.flag == fit_results.EXIT_SUCCESS

pars = {}
for p_name, par in self._cached_pars.items():
Expand All @@ -220,11 +224,14 @@
results.y_obs = self._cached_model.y
results.y_calc = self.evaluate(results.x, minimizer_parameters=results.p)
results.y_err = weights
results.n_evaluations = int(fit_results.nf)
results.message = str(fit_results.msg)
# results.residual = results.y_obs - results.y_calc
# results.goodness_of_fit = fit_results.f

results.minimizer_engine = self.__class__
results.fit_args = None
results.engine_result = fit_results
# results.check_sanity()

return results
Expand Down Expand Up @@ -258,10 +265,10 @@

results = dfols.solve(model, pars_values, bounds=bounds, **kwargs)

if 'Success' not in results.msg:
raise FitError(f'Fit failed with message: {results.msg}')
if results.flag in {results.EXIT_SUCCESS, results.EXIT_MAXFUN_WARNING}:
return results

return results
raise FitError(f'Fit failed with message: {results.msg}')

@staticmethod
def _prepare_kwargs(
Expand Down
2 changes: 2 additions & 0 deletions src/easyscience/fitting/minimizers/minimizer_lmfit.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,8 @@ def _gen_fit_results(self, fit_results: ModelResult, **kwargs) -> FitResults:
# results.goodness_of_fit = fit_results.chisqr
results.y_calc = fit_results.best_fit
results.y_err = 1 / fit_results.weights
results.n_evaluations = fit_results.nfev
results.message = fit_results.message
results.minimizer_engine = self.__class__
results.fit_args = None

Expand Down
4 changes: 4 additions & 0 deletions src/easyscience/fitting/minimizers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ class FitResults:
'y_obs',
'y_calc',
'y_err',
'n_evaluations',
'message',
'engine_result',
'total_results',
]
Expand All @@ -35,6 +37,8 @@ def __init__(self):
self.y_obs = np.ndarray([])
self.y_calc = np.ndarray([])
self.y_err = np.ndarray([])
self.n_evaluations = None
self.message = ''
self.engine_result = None
self.total_results = None

Expand Down
2 changes: 2 additions & 0 deletions src/easyscience/fitting/multi_fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,8 @@ def _post_compute_reshaping(
current_results.minimizer_engine = fit_result_obj.minimizer_engine
current_results.p = fit_result_obj.p
current_results.p0 = fit_result_obj.p0
current_results.n_evaluations = fit_result_obj.n_evaluations
current_results.message = fit_result_obj.message
current_results.x = this_x
current_results.y_obs = y[idx]
current_results.y_calc = np.reshape(
Expand Down
50 changes: 42 additions & 8 deletions tests/integration/fitting/test_fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,14 +207,48 @@ def test_basic_max_evaluations(fit_engine):
except AttributeError:
pytest.skip(msg=f'{fit_engine} is not installed')
f.max_evaluations = 3
try:
result = f.fit(x=x, y=y, weights=weights)
# Result should not be the same as the reference
assert sp_sin.phase.value != pytest.approx(ref_sin.phase.value, rel=1e-3)
assert sp_sin.offset.value != pytest.approx(ref_sin.offset.value, rel=1e-3)
except FitError as e:
# DFO throws a different error
assert 'Objective has been called MAXFUN times' in str(e)
result = f.fit(x=x, y=y, weights=weights)
# Result should not be the same as the reference
assert sp_sin.phase.value != pytest.approx(ref_sin.phase.value, rel=1e-3)
assert sp_sin.offset.value != pytest.approx(ref_sin.offset.value, rel=1e-3)


@pytest.mark.fast
@pytest.mark.parametrize(
'fit_engine',
[
None,
AvailableMinimizers.LMFit,
AvailableMinimizers.Bumps,
AvailableMinimizers.DFO,
],
)
def test_max_evaluations_populates_fit_result_fields(fit_engine):
"""With a tight budget every engine must return success=False, n_evaluations>0, non-empty message."""
ref_sin = AbsSin(0.2, np.pi)
sp_sin = AbsSin(0.354, 3.05)

x = np.linspace(0, 5, 200)
weights = np.ones_like(x)
y = ref_sin(x)

sp_sin.offset.fixed = False
sp_sin.phase.fixed = False

f = Fitter(sp_sin, sp_sin)
if fit_engine is not None:
try:
f.switch_minimizer(fit_engine)
except AttributeError:
pytest.skip(msg=f'{fit_engine} is not installed')
f.max_evaluations = 3
result = f.fit(x=x, y=y, weights=weights)

assert result.success is False
assert result.n_evaluations is not None
assert result.n_evaluations > 0
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it make sense to check that n_evaluations matches the budget?

assert isinstance(result.message, str)
assert len(result.message) > 0


@pytest.mark.fast
Expand Down
40 changes: 40 additions & 0 deletions tests/integration/fitting/test_multi_fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,46 @@ def test_multi_fit(fit_engine):
assert result.residual == pytest.approx(F_real[idx](X[idx]) - F_ref[idx](X[idx]), abs=1e-2)


@pytest.mark.parametrize('fit_engine', [None, 'LMFit', 'Bumps', 'DFO'])
def test_multi_fit_propagates_n_evaluations_and_message(fit_engine):
"""Verify that n_evaluations and message are copied into each per-dataset result."""
ref_sin_1 = AbsSin(0.2, np.pi)
sp_sin_1 = AbsSin(0.354, 3.05)
ref_sin_2 = AbsSin(np.pi * 0.45, 0.45 * np.pi * 0.5)
sp_sin_2 = AbsSin(1, 0.5)

ref_sin_2.offset.make_dependent_on(
dependency_expression='ref_sin1', dependency_map={'ref_sin1': ref_sin_1.offset}
)
sp_sin_2.offset.make_dependent_on(
dependency_expression='sp_sin1', dependency_map={'sp_sin1': sp_sin_1.offset}
)

x1 = np.linspace(0, 5, 200)
y1 = ref_sin_1(x1)
x2 = np.copy(x1)
y2 = ref_sin_2(x2)
weights = np.ones_like(x1)

sp_sin_1.offset.fixed = False
sp_sin_1.phase.fixed = False
sp_sin_2.phase.fixed = False

f = MultiFitter([sp_sin_1, sp_sin_2], [sp_sin_1, sp_sin_2])
if fit_engine is not None:
try:
f.switch_minimizer(fit_engine)
except AttributeError:
pytest.skip(msg=f'{fit_engine} is not installed')

results = f.fit(x=[x1, x2], y=[y1, y2], weights=[weights, weights])
for result in results:
assert result.n_evaluations is not None
assert isinstance(result.n_evaluations, int)
assert result.n_evaluations > 0
assert isinstance(result.message, str)


@pytest.mark.parametrize('fit_engine', [None, 'LMFit', 'Bumps', 'DFO'])
def test_multi_fit2(fit_engine):
ref_sin_1 = AbsSin(0.2, np.pi)
Expand Down
18 changes: 14 additions & 4 deletions tests/unit/fitting/minimizers/test_minimizer_bumps.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def fake_set_parameter_fit_result(fit_result, stack_status, par_list):
assert result == 'gen_fit_results'
mock_bumps_fit.assert_called_once_with(mock_FitProblem_instance, method='amoeba')
minimizer._make_model.assert_called_once_with(parameters=None)
minimizer._gen_fit_results.assert_called_once_with('fit')
minimizer._gen_fit_results.assert_called_once_with('fit', max_evaluations=None)
mock_model_function.assert_called_once_with(1.0, 2.0, 1)
mock_FitProblem.assert_called_once_with(mock_model)

Expand Down Expand Up @@ -127,10 +127,13 @@ def test_make_model(self, minimizer: Bumps, monkeypatch) -> None:
curve_for_model = model(
x=np.array([1, 2]), y=np.array([10, 20]), weights=np.array([100, 200])
)
wrapped_fit_function = mock_Curve.call_args[0][0]
wrapped_fit_function(np.array([1, 2]), pmock_parm_1=3)

# Expect
minimizer._generate_fit_function.assert_called_once_with()
assert mock_Curve.call_args[0][0] == mock_fit_function
assert minimizer._eval_counter is wrapped_fit_function
assert minimizer._eval_counter.count == 1
assert all(mock_Curve.call_args[0][1] == np.array([1, 2]))
assert all(mock_Curve.call_args[0][2] == np.array([10, 20]))
assert curve_for_model == 'curve'
Expand Down Expand Up @@ -178,6 +181,7 @@ def test_gen_fit_results(self, minimizer: Bumps, monkeypatch):

mock_fit_result = MagicMock()
mock_fit_result.success = True
mock_fit_result.nit = 2 # nit >= max_evaluations - 1 → budget exhausted

mock_cached_model = MagicMock()
mock_cached_model.x = 'x'
Expand All @@ -193,28 +197,34 @@ def test_gen_fit_results(self, minimizer: Bumps, monkeypatch):
minimizer._cached_pars = {'par_1': mock_cached_par_1, 'par_2': mock_cached_par_2}

minimizer._p_0 = 'p_0'
minimizer._eval_counter = MagicMock(count=7)
minimizer.evaluate = MagicMock(return_value='evaluate')

# Then
domain_fit_results = minimizer._gen_fit_results(
mock_fit_result, **{'kwargs_set_key': 'kwargs_set_val'}
mock_fit_result,
max_evaluations=3,
**{'kwargs_set_key': 'kwargs_set_val'},
)

# Expect
assert domain_fit_results == mock_domain_fit_results
assert domain_fit_results.kwargs_set_key == 'kwargs_set_val'
assert domain_fit_results.success == True
assert domain_fit_results.success == False
assert domain_fit_results.y_obs == 'y'
assert domain_fit_results.x == 'x'
assert domain_fit_results.p == {'ppar_1': 'par_value_1', 'ppar_2': 'par_value_2'}
assert domain_fit_results.p0 == 'p_0'
assert domain_fit_results.y_calc == 'evaluate'
assert domain_fit_results.y_err == 'dy'
assert domain_fit_results.n_evaluations == 7
assert domain_fit_results.message == 'Fit stopped: reached maximum evaluations (3)'
assert (
str(domain_fit_results.minimizer_engine)
== "<class 'easyscience.fitting.minimizers.minimizer_bumps.Bumps'>"
)
assert domain_fit_results.fit_args is None
assert domain_fit_results.engine_result == mock_fit_result
minimizer.evaluate.assert_called_once_with(
'x', minimizer_parameters={'ppar_1': 'par_value_1', 'ppar_2': 'par_value_2'}
)
Loading
Loading