diff --git a/src/optimagic/optimization/multistart.py b/src/optimagic/optimization/multistart.py index 9b7544430..e990dd79f 100644 --- a/src/optimagic/optimization/multistart.py +++ b/src/optimagic/optimization/multistart.py @@ -12,7 +12,7 @@ """ import warnings -from dataclasses import replace +from dataclasses import dataclass, replace from typing import Literal import numpy as np @@ -85,8 +85,8 @@ def run_multistart_optimization( scheduled_steps = scheduled_steps[1:] - sorted_sample = exploration_res["sorted_sample"] - sorted_values = exploration_res["sorted_values"] + sorted_sample = exploration_res.sorted_sample + sorted_values = exploration_res.sorted_values stopping_maxopt = options.stopping_maxopt if stopping_maxopt > len(sorted_sample): @@ -172,7 +172,7 @@ def single_optimization(x0, step_id): "start_parameters": state["start_history"], "local_optima": state["result_history"], "exploration_sample": sorted_sample, - "exploration_results": exploration_res["sorted_values"], + "exploration_results": sorted_values, } raw_res = state["best_res"] @@ -288,12 +288,27 @@ def _draw_exploration_sample( return sample_scaled +@dataclass(frozen=True) +class _InternalExplorationResult: + """Exploration result of the multistart optimization. + + Attributes: + sorted_values: List of sorted function values. + sorted_sample: 2d numpy array where each row is the internal parameter + vector corresponding to the sorted function values. + + """ + + sorted_values: list[float] + sorted_sample: NDArray[np.float64] + + def run_explorations( internal_problem: InternalOptimizationProblem, sample: NDArray[np.float64], n_cores: int, step_id: int, -) -> dict[str, NDArray[np.float64]]: +) -> _InternalExplorationResult: """Do the function evaluations for the exploration phase. Args: @@ -305,11 +320,11 @@ def run_explorations( step_id: The identifier of the exploration step. Returns: - dict: A dictionary with the the following entries: - "sorted_values": 1d numpy array with sorted function values. Invalid - function values are excluded. - "sorted_sample": 2d numpy array with corresponding internal parameter - vectors. + A data object containing + - sorted_values: List of sorted function values. Invalid function values are + excluded. + - sorted_sample: 2d numpy array where each row is the internal parameter + vector corresponding to the sorted function values. """ internal_problem = internal_problem.with_step_id(step_id) @@ -334,10 +349,10 @@ def run_explorations( # of the sign switch. sorting_indices = np.argsort(valid_values) - out = { - "sorted_values": valid_values[sorting_indices], - "sorted_sample": valid_sample[sorting_indices], - } + out = _InternalExplorationResult( + sorted_values=valid_values[sorting_indices].tolist(), + sorted_sample=valid_sample[sorting_indices], + ) return out diff --git a/src/optimagic/optimization/process_results.py b/src/optimagic/optimization/process_results.py index 64d764174..f9a2c191e 100644 --- a/src/optimagic/optimization/process_results.py +++ b/src/optimagic/optimization/process_results.py @@ -137,6 +137,9 @@ def _process_multistart_info( solver_type: AggregationLevel, extra_fields: ExtraResultFields, ) -> MultistartInfo: + # The `info` dictionary is obtained from the `multistart_info` field of the + # InternalOptimizeResult returned by `run_multistart_optimization` function. + starts = [converter.params_from_internal(x) for x in info["start_parameters"]] optima = [] diff --git a/src/optimagic/visualization/history_plots.py b/src/optimagic/visualization/history_plots.py index cb64a4e94..cfde3a69e 100644 --- a/src/optimagic/visualization/history_plots.py +++ b/src/optimagic/visualization/history_plots.py @@ -339,7 +339,7 @@ def _extract_plotting_data_from_results_object( if stack_multistart and local_histories is not None: stacked = _get_stacked_local_histories(local_histories, res.direction) if show_exploration: - fun = res.multistart_info.exploration_results.tolist()[::-1] + stacked.fun + fun = res.multistart_info.exploration_results[::-1] + stacked.fun params = res.multistart_info.exploration_sample[::-1] + stacked.params stacked = History( diff --git a/tests/optimagic/optimization/test_multistart.py b/tests/optimagic/optimization/test_multistart.py index 06ec00236..a6a2f90e2 100644 --- a/tests/optimagic/optimization/test_multistart.py +++ b/tests/optimagic/optimization/test_multistart.py @@ -84,8 +84,8 @@ def with_step_id(self, step_id): exp_values = np.array([-9, -1]) exp_sample = np.array([[4, 5], [0, 1]]) - aaae(calculated["sorted_sample"], exp_sample) - aaae(calculated["sorted_values"], exp_values) + aaae(calculated.sorted_sample, exp_sample) + aaae(calculated.sorted_values, exp_values) def test_get_batched_optimization_sample(): diff --git a/tests/optimagic/optimization/test_with_multistart.py b/tests/optimagic/optimization/test_with_multistart.py index cf4b24937..bc4d083b1 100644 --- a/tests/optimagic/optimization/test_with_multistart.py +++ b/tests/optimagic/optimization/test_with_multistart.py @@ -80,6 +80,7 @@ def test_multistart_optimization_with_sum_of_squares_at_defaults( assert hasattr(res, "multistart_info") ms_info = res.multistart_info assert len(ms_info.exploration_sample) == 400 + assert isinstance(ms_info.exploration_results, list) assert len(ms_info.exploration_results) == 400 assert all(isinstance(entry, float) for entry in ms_info.exploration_results) assert all(isinstance(entry, OptimizeResult) for entry in ms_info.local_optima)