Skip to content

Commit a6d9379

Browse files
authored
Merge pull request #87 from EasyScience/develop
Develop to master
2 parents 23f4ced + 9561a52 commit a6d9379

File tree

11 files changed

+244
-40
lines changed

11 files changed

+244
-40
lines changed

src/easyscience/__version__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = '1.1.2'
1+
__version__ = '1.2.0'

src/easyscience/fitting/fitter.py

Lines changed: 51 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,13 @@ class Fitter:
2626
def __init__(self, fit_object, fit_function: Callable):
2727
self._fit_object = fit_object
2828
self._fit_function = fit_function
29-
self._dependent_dims = None
29+
self._dependent_dims: int = None
30+
self._tolerance: float = None
31+
self._max_evaluations: int = None
3032

31-
self._enum_current_minimizer = DEFAULT_MINIMIZER
32-
self._minimizer: MinimizerBase # _minimizer is set in the create method
33-
self._update_minimizer(self._enum_current_minimizer)
33+
self._minimizer: MinimizerBase = None # set in _update_minimizer
34+
self._enum_current_minimizer: AvailableMinimizers = None # set in _update_minimizer
35+
self._update_minimizer(DEFAULT_MINIMIZER)
3436

3537
def fit_constraints(self) -> list:
3638
return self._minimizer.fit_constraints()
@@ -110,6 +112,42 @@ def minimizer(self) -> MinimizerBase:
110112
"""
111113
return self._minimizer
112114

115+
@property
116+
def tolerance(self) -> float:
117+
"""
118+
Get the tolerance for the minimizer.
119+
120+
:return: Tolerance for the minimizer
121+
"""
122+
return self._tolerance
123+
124+
@tolerance.setter
125+
def tolerance(self, tolerance: float) -> None:
126+
"""
127+
Set the tolerance for the minimizer.
128+
129+
:param tolerance: Tolerance for the minimizer
130+
"""
131+
self._tolerance = tolerance
132+
133+
@property
134+
def max_evaluations(self) -> int:
135+
"""
136+
Get the maximal number of evaluations for the minimizer.
137+
138+
:return: Maximal number of steps for the minimizer
139+
"""
140+
return self._max_evaluations
141+
142+
@max_evaluations.setter
143+
def max_evaluations(self, max_evaluations: int) -> None:
144+
"""
145+
Set the maximal number of evaluations for the minimizer.
146+
147+
:param max_evaluations: Maximal number of steps for the minimizer
148+
"""
149+
self._max_evaluations = max_evaluations
150+
113151
@property
114152
def fit_function(self) -> Callable:
115153
"""
@@ -175,7 +213,7 @@ def fit(self) -> Callable:
175213
re-constitute the independent variables and once the fit is completed, reshape the inputs to those expected.
176214
"""
177215

178-
@functools.wraps(self.minimizer.fit)
216+
@functools.wraps(self._minimizer.fit)
179217
def inner_fit_callable(
180218
x: np.ndarray,
181219
y: np.ndarray,
@@ -202,7 +240,14 @@ def inner_fit_callable(
202240
constraints = self._minimizer.fit_constraints()
203241
self.fit_function = fit_fun_wrap
204242
self._minimizer.set_fit_constraint(constraints)
205-
f_res = self.minimizer.fit(x_fit, y_new, weights=weights, **kwargs)
243+
f_res = self._minimizer.fit(
244+
x_fit,
245+
y_new,
246+
weights=weights,
247+
tolerance=self._tolerance,
248+
max_evaluations=self._max_evaluations,
249+
**kwargs,
250+
)
206251

207252
# Postcompute
208253
fit_result = self._post_compute_reshaping(f_res, x, y)

src/easyscience/fitting/minimizers/minimizer_base.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def __init__(
4040
self,
4141
obj, #: BaseObj,
4242
fit_function: Callable,
43-
minimizer_enum: Optional[AvailableMinimizers] = None,
43+
minimizer_enum: AvailableMinimizers,
4444
): # todo after constraint changes, add type hint: obj: BaseObj # noqa: E501
4545
if minimizer_enum.method not in self.supported_methods():
4646
raise FitError(f'Method {minimizer_enum.method} not available in {self.__class__}')
@@ -58,6 +58,10 @@ def __init__(
5858
def all_constraints(self) -> List[ObjConstraint]:
5959
return [*self._constraints, *self._object._constraints]
6060

61+
@property
62+
def enum(self) -> AvailableMinimizers:
63+
return self._minimizer_enum
64+
6165
@property
6266
def name(self) -> str:
6367
return self._minimizer_enum.name
@@ -83,6 +87,8 @@ def fit(
8387
model: Optional[Callable] = None,
8488
parameters: Optional[Parameter] = None,
8589
method: Optional[str] = None,
90+
tolerance: Optional[float] = None,
91+
max_evaluations: Optional[int] = None,
8692
**kwargs,
8793
) -> FitResults:
8894
"""
@@ -129,7 +135,7 @@ def evaluate(self, x: np.ndarray, minimizer_parameters: Optional[dict[str, float
129135

130136
return self._fit_function(x, **minimizer_parameters, **kwargs)
131137

132-
def _get_method_dict(self, passed_method: Optional[str] = None) -> dict[str, str]:
138+
def _get_method_kwargs(self, passed_method: Optional[str] = None) -> dict[str, str]:
133139
if passed_method is not None:
134140
if passed_method not in self.supported_methods():
135141
raise FitError(f'Method {passed_method} not available in {self.__class__}')

src/easyscience/fitting/minimizers/minimizer_bumps.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,8 @@ def fit(
7474
model: Optional[Callable] = None,
7575
parameters: Optional[Parameter] = None,
7676
method: Optional[str] = None,
77+
tolerance: Optional[float] = None,
78+
max_evaluations: Optional[int] = None,
7779
minimizer_kwargs: Optional[dict] = None,
7880
engine_kwargs: Optional[dict] = None,
7981
**kwargs,
@@ -97,7 +99,7 @@ def fit(
9799
:return: Fit results
98100
:rtype: ModelResult
99101
"""
100-
method_dict = self._get_method_dict(method)
102+
method_dict = self._get_method_kwargs(method)
101103

102104
if weights is None:
103105
weights = np.sqrt(np.abs(y))
@@ -107,10 +109,14 @@ def fit(
107109

108110
if minimizer_kwargs is None:
109111
minimizer_kwargs = {}
110-
# else:
111-
# minimizer_kwargs = {"fit_kws": minimizer_kwargs}
112112
minimizer_kwargs.update(engine_kwargs)
113113

114+
if tolerance is not None:
115+
minimizer_kwargs['ftol'] = tolerance # tolerance for change in function value
116+
minimizer_kwargs['xtol'] = tolerance # tolerance for change in parameter value, could be an independent value
117+
if max_evaluations is not None:
118+
minimizer_kwargs['steps'] = max_evaluations
119+
114120
if model is None:
115121
model_function = self._make_model(parameters=parameters)
116122
model = model_function(x, y, weights)

src/easyscience/fitting/minimizers/minimizer_dfo.py

Lines changed: 24 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,7 @@ def supported_methods() -> List[str]:
5353

5454
@staticmethod
5555
def all_methods() -> List[str]:
56-
return [
57-
'leastsq',
58-
]
56+
return ['leastsq']
5957

6058
def fit(
6159
self,
@@ -65,8 +63,8 @@ def fit(
6563
model: Optional[Callable] = None,
6664
parameters: Optional[List[Parameter]] = None,
6765
method: str = None,
68-
xtol: float = 1e-6,
69-
ftol: float = 1e-8,
66+
tolerance: Optional[float] = None,
67+
max_evaluations: Optional[int] = None,
7068
**kwargs,
7169
) -> FitResults:
7270
"""
@@ -110,6 +108,8 @@ def fit(
110108
stack_status = global_object.stack.enabled
111109
global_object.stack.enabled = False
112110

111+
kwargs = self._prepare_kwargs(tolerance, max_evaluations, **kwargs)
112+
113113
try:
114114
model_results = self._dfo_fit(self._cached_pars, model, **kwargs)
115115
self._set_parameter_fit_result(model_results, stack_status)
@@ -239,7 +239,11 @@ def _gen_fit_results(self, fit_results, weights, **kwargs) -> FitResults:
239239
return results
240240

241241
@staticmethod
242-
def _dfo_fit(pars: Dict[str, Parameter], model: Callable, **kwargs):
242+
def _dfo_fit(
243+
pars: Dict[str, Parameter],
244+
model: Callable,
245+
**kwargs,
246+
):
243247
"""
244248
Method to convert EasyScience styling to DFO-LS styling (yes, again)
245249
@@ -261,13 +265,23 @@ def _dfo_fit(pars: Dict[str, Parameter], model: Callable, **kwargs):
261265
np.array([par.max for par in pars.values()]),
262266
)
263267
# https://numericalalgorithmsgroup.github.io/dfols/build/html/userguide.html
264-
if np.isinf(bounds).any():
265-
results = dfols.solve(model, pars_values, bounds=bounds, **kwargs)
266-
else:
268+
if not np.isinf(bounds).any():
267269
# It is only possible to scale (normalize) variables if they are bound (different from inf)
268-
results = dfols.solve(model, pars_values, bounds=bounds, scaling_within_bounds=True, **kwargs)
270+
kwargs['scaling_within_bounds'] = True
271+
272+
results = dfols.solve(model, pars_values, bounds=bounds, **kwargs)
269273

270274
if 'Success' not in results.msg:
271275
raise FitError(f'Fit failed with message: {results.msg}')
272276

273277
return results
278+
279+
@staticmethod
280+
def _prepare_kwargs(tolerance: Optional[float] = None, max_evaluations: Optional[int] = None, **kwargs) -> dict[str:str]:
281+
if max_evaluations is not None:
282+
kwargs['maxfun'] = max_evaluations # max number of function evaluations
283+
if tolerance is not None:
284+
if 0.1 < tolerance: # dfo module throws errer if larger value
285+
raise ValueError('Tolerance must be equal or smaller than 0.1')
286+
kwargs['rhoend'] = tolerance # size of the trust region
287+
return kwargs

src/easyscience/fitting/minimizers/minimizer_lmfit.py

Lines changed: 24 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,8 @@ def fit(
8585
model: Optional[LMModel] = None,
8686
parameters: Optional[LMParameters] = None,
8787
method: Optional[str] = None,
88+
tolerance: Optional[float] = None,
89+
max_evaluations: Optional[int] = None,
8890
minimizer_kwargs: Optional[dict] = None,
8991
engine_kwargs: Optional[dict] = None,
9092
**kwargs,
@@ -110,19 +112,14 @@ def fit(
110112
:return: Fit results
111113
:rtype: ModelResult
112114
"""
113-
method_dict = self._get_method_dict(method)
114-
115115
if weights is None:
116116
weights = 1 / np.sqrt(np.abs(y))
117117

118118
if engine_kwargs is None:
119119
engine_kwargs = {}
120120

121-
if minimizer_kwargs is None:
122-
minimizer_kwargs = {}
123-
else:
124-
minimizer_kwargs = {'fit_kws': minimizer_kwargs}
125-
minimizer_kwargs.update(engine_kwargs)
121+
method_kwargs = self._get_method_kwargs(method)
122+
fit_kws_dict = self._get_fit_kws(method, tolerance, minimizer_kwargs)
126123

127124
# Why do we do this? Because a fitting template has to have global_object instantiated outside pre-runtime
128125
from easyscience import global_object
@@ -134,7 +131,16 @@ def fit(
134131
if model is None:
135132
model = self._make_model()
136133

137-
model_results = model.fit(y, x=x, weights=weights, **method_dict, **minimizer_kwargs, **kwargs)
134+
model_results = model.fit(
135+
y,
136+
x=x,
137+
weights=weights,
138+
max_nfev=max_evaluations,
139+
fit_kws=fit_kws_dict,
140+
**method_kwargs,
141+
**engine_kwargs,
142+
**kwargs,
143+
)
138144
self._set_parameter_fit_result(model_results, stack_status)
139145
results = self._gen_fit_results(model_results)
140146
except Exception as e:
@@ -143,6 +149,16 @@ def fit(
143149
raise FitError(e)
144150
return results
145151

152+
def _get_fit_kws(self, method: str, tolerance: float, minimizer_kwargs: dict[str:str]) -> dict[str:str]:
153+
if minimizer_kwargs is None:
154+
minimizer_kwargs = {}
155+
if tolerance is not None:
156+
if method in [None, 'least_squares', 'leastsq']:
157+
minimizer_kwargs['ftol'] = tolerance
158+
if method in ['differential_evolution', 'powell', 'cobyla']:
159+
minimizer_kwargs['tol'] = tolerance
160+
return minimizer_kwargs
161+
146162
def convert_to_pars_obj(self, parameters: Optional[List[Parameter]] = None) -> LMParameters:
147163
"""
148164
Create an lmfit compatible container with the `Parameters` converted from the base object.

tests/integration_tests/Fitting/test_fitter.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,62 @@ def test_fit_result(fit_engine):
140140
check_fit_results(result, sp_sin, ref_sin, x, sp_ref1=sp_ref1, sp_ref2=sp_ref2)
141141

142142

143+
@pytest.mark.parametrize("fit_engine", [None, AvailableMinimizers.LMFit, AvailableMinimizers.Bumps, AvailableMinimizers.DFO])
144+
def test_basic_max_evaluations(fit_engine):
145+
ref_sin = AbsSin(0.2, np.pi)
146+
sp_sin = AbsSin(0.354, 3.05)
147+
148+
x = np.linspace(0, 5, 200)
149+
y = ref_sin(x)
150+
151+
sp_sin.offset.fixed = False
152+
sp_sin.phase.fixed = False
153+
154+
f = Fitter(sp_sin, sp_sin)
155+
if fit_engine is not None:
156+
try:
157+
f.switch_minimizer(fit_engine)
158+
except AttributeError:
159+
pytest.skip(msg=f"{fit_engine} is not installed")
160+
args = [x, y]
161+
kwargs = {}
162+
f.max_evaluations = 3
163+
try:
164+
result = f.fit(*args, **kwargs)
165+
# Result should not be the same as the reference
166+
assert sp_sin.phase.value != pytest.approx(ref_sin.phase.value, rel=1e-3)
167+
assert sp_sin.offset.value != pytest.approx(ref_sin.offset.value, rel=1e-3)
168+
except FitError as e:
169+
# DFO throws a different error
170+
assert "Objective has been called MAXFUN times" in str(e)
171+
172+
173+
@pytest.mark.parametrize("fit_engine,tolerance", [(None, 10), (AvailableMinimizers.LMFit, 10), (AvailableMinimizers.Bumps, 10), (AvailableMinimizers.DFO, 0.1)])
174+
def test_basic_tolerance(fit_engine, tolerance):
175+
ref_sin = AbsSin(0.2, np.pi)
176+
sp_sin = AbsSin(0.354, 3.05)
177+
178+
x = np.linspace(0, 5, 200)
179+
y = ref_sin(x)
180+
181+
sp_sin.offset.fixed = False
182+
sp_sin.phase.fixed = False
183+
184+
f = Fitter(sp_sin, sp_sin)
185+
if fit_engine is not None:
186+
try:
187+
f.switch_minimizer(fit_engine)
188+
except AttributeError:
189+
pytest.skip(msg=f"{fit_engine} is not installed")
190+
args = [x, y]
191+
kwargs = {}
192+
f.tolerance = tolerance
193+
result = f.fit(*args, **kwargs)
194+
# Result should not be the same as the reference
195+
assert sp_sin.phase.value != pytest.approx(ref_sin.phase.value, rel=1e-3)
196+
assert sp_sin.offset.value != pytest.approx(ref_sin.offset.value, rel=1e-3)
197+
198+
143199
@pytest.mark.parametrize("fit_method", ["leastsq", "powell", "cobyla"])
144200
def test_lmfit_methods(fit_method):
145201
ref_sin = AbsSin(0.2, np.pi)

0 commit comments

Comments
 (0)