|
| 1 | +# bluemira is an integrated inter-disciplinary design tool for future fusion |
| 2 | +# reactors. It incorporates several modules, some of which rely on other |
| 3 | +# codes, to carry out a range of typical conceptual fusion reactor design |
| 4 | +# activities. |
| 5 | +# |
| 6 | +# Copyright (C) 2021 M. Coleman, J. Cook, F. Franza, I.A. Maione, S. McIntosh, J. Morris, |
| 7 | +# D. Short |
| 8 | +# |
| 9 | +# bluemira is free software; you can redistribute it and/or |
| 10 | +# modify it under the terms of the GNU Lesser General Public |
| 11 | +# License as published by the Free Software Foundation; either |
| 12 | +# version 2.1 of the License, or (at your option) any later version. |
| 13 | +# |
| 14 | +# bluemira is distributed in the hope that it will be useful, |
| 15 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 17 | +# Lesser General Public License for more details. |
| 18 | +# |
| 19 | +# You should have received a copy of the GNU Lesser General Public |
| 20 | +# License along with bluemira; if not, see <https://www.gnu.org/licenses/>. |
| 21 | +""" |
| 22 | +A quick tutorial on the optimisation of geometry in bluemira |
| 23 | +""" |
| 24 | + |
| 25 | +# We're going to set up some geometry optimisation problems and solve them with different |
| 26 | +# optimisastion algorithms. |
| 27 | +import numpy as np |
| 28 | + |
| 29 | +from bluemira.geometry.optimisation import GeometryOptimisationProblem, minimise_length |
| 30 | +from bluemira.geometry.parameterisations import PrincetonD |
| 31 | +from bluemira.utilities.opt_problems import OptimisationConstraint, OptimisationObjective |
| 32 | +from bluemira.utilities.optimiser import Optimiser, approx_derivative |
| 33 | +from bluemira.utilities.tools import set_random_seed |
| 34 | + |
| 35 | +from simvue import Run,Handler |
| 36 | +import logging |
| 37 | + |
| 38 | +# Let's set up a simple GeometryOptimisationProblem, where we minimise the length of |
| 39 | +# parameterised geometry. |
| 40 | + |
| 41 | +# First, we set up the GeometryParameterisation, with some bounds on the variables. |
| 42 | +x1_lower = 2 |
| 43 | +x1_value = 2.05 |
| 44 | +x1_upper = 6 |
| 45 | +x2_lower = 80 |
| 46 | +x2_value = 198.5 |
| 47 | +x2_upper = 260 |
| 48 | +dz_lower = -0.5 |
| 49 | +dz_upper = 0.5 |
| 50 | +max_eval = 500 |
| 51 | +ftol_abs = 1e-12 |
| 52 | +ftol_rel = 1e-12 |
| 53 | + |
| 54 | +run = Run() |
| 55 | + |
| 56 | +logger = logging.getLogger(__name__) |
| 57 | +logger.setLevel(logging.DEBUG) |
| 58 | +sth = Handler(run) |
| 59 | +logger.addHandler(sth) |
| 60 | + |
| 61 | +run.init(metadata={'dataset.x1_lower': x1_lower, |
| 62 | + 'dataset.x1_upper': x1_upper, |
| 63 | + 'dataset.x1_value': x1_value, |
| 64 | + 'dataset.x2_lower': x2_lower, |
| 65 | + 'dataset.x2_upper': x2_upper, |
| 66 | + 'dataset.x2_value': x2_value, |
| 67 | + 'dataset.dz_lower': dz_lower, |
| 68 | + 'dataset.dz_upper': dz_upper, |
| 69 | + 'optimiser.max_eval': max_eval, |
| 70 | + 'optimiser.ftol_abs': ftol_abs, |
| 71 | + 'optimiser.ftol_rel': ftol_rel, |
| 72 | + }, |
| 73 | + description="A simple GeometryOptimisationProblem, where we minimise the length of parameterised geometry using gradient-based optimisation algorithm." |
| 74 | +) |
| 75 | + |
| 76 | +logger.info("Initialised run") |
| 77 | +logger.info("Create parameterisation") |
| 78 | +parameterisation_1 = PrincetonD( |
| 79 | + { |
| 80 | + "x1": {"lower_bound": x1_lower, "value": x1_value, "upper_bound": x1_upper}, |
| 81 | + "x2": {"lower_bound": x2_lower, "value": x2_value, "upper_bound": x2_upper}, |
| 82 | + "dz": {"lower_bound": dz_lower, "value": 0, "upper_bound": dz_upper}, |
| 83 | + } |
| 84 | +) |
| 85 | + |
| 86 | +# Here we're minimising the length, and we can work out that the dz variable will not |
| 87 | +# affect the optimisation, so let's just fix at some value and remove it from the problem |
| 88 | +parameterisation_1.fix_variable("dz", value=0) |
| 89 | + |
| 90 | +# Now, we set up our optimiser. We'll start with a gradient-based optimisation algorithm |
| 91 | +logger.info("Define Optimiser") |
| 92 | + |
| 93 | +slsqp_optimiser = Optimiser( |
| 94 | + "SLSQP", opt_conditions={"max_eval": max_eval, "ftol_abs": ftol_abs, "ftol_rel": ftol_rel} |
| 95 | +) |
| 96 | + |
| 97 | + |
| 98 | +#Define the call back function |
| 99 | +def calculate_length(vector, parameterisation): |
| 100 | + """ |
| 101 | + Calculate the length of the parameterised shape for a given state vector. |
| 102 | + """ |
| 103 | + |
| 104 | + parameterisation.variables.set_values_from_norm(vector) |
| 105 | + print('logging metrics', float(parameterisation.variables['x1'].value)) |
| 106 | + run.log_metrics({'x1_value': float(parameterisation.variables['x1'].value), 'x1_lower': float(parameterisation.variables['x1'].lower_bound), 'x1_upper': float(parameterisation.variables['x1'].upper_bound)}) |
| 107 | + run.log_metrics({'x2_value': float(parameterisation.variables['x2'].value), 'x2_lower': float(parameterisation.variables['x2'].lower_bound), 'x2_upper': float(parameterisation.variables['x2'].upper_bound)}) |
| 108 | + |
| 109 | + return parameterisation.create_shape().length |
| 110 | + |
| 111 | + |
| 112 | +def my_minimise_length(vector, grad, parameterisation, ad_args=None): |
| 113 | + """ |
| 114 | + Objective function for nlopt optimisation (minimisation) of length. |
| 115 | +
|
| 116 | + Parameters |
| 117 | + ---------- |
| 118 | + vector: np.ndarray |
| 119 | + State vector of the array of coil currents. |
| 120 | + grad: np.ndarray |
| 121 | + Local gradient of objective function used by LD NLOPT algorithms. |
| 122 | + Updated in-place. |
| 123 | + ad_args: Dict |
| 124 | + Additional arguments to pass to the `approx_derivative` function. |
| 125 | +
|
| 126 | + Returns |
| 127 | + ------- |
| 128 | + fom: Value of objective function (figure of merit). |
| 129 | + """ |
| 130 | + ad_args = ad_args if ad_args is not None else {} |
| 131 | + print(vector) |
| 132 | + length = calculate_length(vector, parameterisation) |
| 133 | + if grad.size > 0: |
| 134 | + grad[:] = approx_derivative( |
| 135 | + calculate_length, vector, f0=length, args=(parameterisation,), **ad_args |
| 136 | + ) |
| 137 | + run.update_metadata({'x1_value': float(parameterisation.variables['x1'].value), 'x2_value': float(parameterisation.variables['x2'].value) }) |
| 138 | + return length |
| 139 | + |
| 140 | + |
| 141 | +# Next, we make our objective function, using in this case one of the ready-made ones. |
| 142 | +# NOTE: This `minimise_length` function includes automatic numerical calculation of the |
| 143 | +# objective function gradient, and expects a certain signature. |
| 144 | +objective = OptimisationObjective( |
| 145 | + my_minimise_length, |
| 146 | + f_objective_args={"parameterisation": parameterisation_1}, |
| 147 | +) |
| 148 | + |
| 149 | + |
| 150 | +# Finally, we initialise our `GeometryOptimisationProblem` and run it. |
| 151 | +logger.info("Call optimiser") |
| 152 | +my_problem = GeometryOptimisationProblem(parameterisation_1, slsqp_optimiser, objective) |
| 153 | +my_problem.optimise() |
| 154 | + |
| 155 | + |
| 156 | +# Here we're minimising the length, within the bounds of our PrincetonD parameterisation, |
| 157 | +# so we'd expect that x1 goes to its upper bound, and x2 goes to its lower bound. |
| 158 | +run.save('bluemira_simvue_geometry_optimisation.py', 'code') |
| 159 | +run.close() |
0 commit comments