|  | 
| 1 | 1 | from __future__ import annotations | 
| 2 |  | -from collections.abc import Iterable, Sequence | 
|  | 2 | +from collections.abc import Callable, Iterable, Sequence | 
| 3 | 3 | import copy | 
|  | 4 | +from dataclasses import dataclass, field | 
| 4 | 5 | from functools import cache | 
| 5 | 6 | from pathlib import Path | 
| 6 | 7 | import math | 
| 7 | 8 | from numbers import Integral, Real | 
| 8 | 9 | import random | 
| 9 | 10 | import re | 
| 10 | 11 | from tempfile import NamedTemporaryFile, TemporaryDirectory | 
|  | 12 | +from typing import Any, Protocol | 
| 11 | 13 | import warnings | 
| 12 | 14 | 
 | 
| 13 | 15 | import h5py | 
| 14 | 16 | import lxml.etree as ET | 
| 15 | 17 | import numpy as np | 
|  | 18 | +from scipy.optimize import curve_fit | 
| 16 | 19 | 
 | 
| 17 | 20 | import openmc | 
| 18 | 21 | import openmc._xml as xml | 
|  | 
| 24 | 27 | from openmc.utility_funcs import change_directory | 
| 25 | 28 | 
 | 
| 26 | 29 | 
 | 
|  | 30 | +# Protocol for a function that is passed to search_keff | 
|  | 31 | +class ModelModifier(Protocol): | 
|  | 32 | +    def __call__(self, val: float, **kwargs: Any) -> None: | 
|  | 33 | +        ... | 
|  | 34 | + | 
|  | 35 | + | 
| 27 | 36 | class Model: | 
| 28 | 37 |     """Model container. | 
| 29 | 38 | 
 | 
| @@ -2196,3 +2205,262 @@ def _replace_infinity(value): | 
| 2196 | 2205 | 
 | 
| 2197 | 2206 |         # Take a wild guess as to how many rays are needed | 
| 2198 | 2207 |         self.settings.particles = 2 * int(max_length) | 
|  | 2208 | + | 
|  | 2209 | +    def keff_search( | 
|  | 2210 | +        self, | 
|  | 2211 | +        func: ModelModifier, | 
|  | 2212 | +        x0: float, | 
|  | 2213 | +        x1: float, | 
|  | 2214 | +        target: float = 1.0, | 
|  | 2215 | +        k_tol: float = 1e-4, | 
|  | 2216 | +        sigma_final: float = 3e-4, | 
|  | 2217 | +        p: float = 0.5, | 
|  | 2218 | +        q: float = 0.95, | 
|  | 2219 | +        memory: int = 4, | 
|  | 2220 | +        x_min: float | None = None, | 
|  | 2221 | +        x_max: float | None = None, | 
|  | 2222 | +        b0: int | None = None, | 
|  | 2223 | +        b_min: int = 20, | 
|  | 2224 | +        b_max: int | None = None, | 
|  | 2225 | +        maxiter: int = 50, | 
|  | 2226 | +        output: bool = False, | 
|  | 2227 | +        func_kwargs: dict[str, Any] | None = None, | 
|  | 2228 | +        run_kwargs: dict[str, Any] | None = None, | 
|  | 2229 | +    ) -> SearchResult: | 
|  | 2230 | +        r"""Perform a keff search on a model parametrized by a single variable. | 
|  | 2231 | +
 | 
|  | 2232 | +        This method uses the GRsecant method described in a paper by `Price and | 
|  | 2233 | +        Roskoff <https://doi.org/10.1016/j.pnucene.2023.104731>`_. The GRsecant | 
|  | 2234 | +        method is a modification of the secant method that accounts for | 
|  | 2235 | +        uncertainties in the function evaluations. The method uses a weighted | 
|  | 2236 | +        linear fit of the most recent function evaluations to predict the next | 
|  | 2237 | +        point to evaluate. It also adaptively changes the number of batches to | 
|  | 2238 | +        meet the target uncertainty value at each iteration. | 
|  | 2239 | +
 | 
|  | 2240 | +        The target uncertainty for iteration :math:`n+1` is determined by the | 
|  | 2241 | +        following equation (following Eq. (8) in the paper): | 
|  | 2242 | +
 | 
|  | 2243 | +        .. math:: | 
|  | 2244 | +            \sigma_{i+1} = q \sigma_\text{final} \left ( \frac{ \min \left \{ | 
|  | 2245 | +            \left\lvert k_i - k_\text{target} \right\rvert : k=0,1,\dots,n | 
|  | 2246 | +            \right \} }{k_\text{tol}} \right )^p | 
|  | 2247 | +
 | 
|  | 2248 | +        where :math:`q` is a multiplicative factor less than 1, given as the | 
|  | 2249 | +        ``sigma_factor`` parameter below. | 
|  | 2250 | +
 | 
|  | 2251 | +        Parameters | 
|  | 2252 | +        ---------- | 
|  | 2253 | +        func : ModelModifier | 
|  | 2254 | +            Function that takes the parameter to be searched and makes a | 
|  | 2255 | +            modification to the model. | 
|  | 2256 | +        x0 : float | 
|  | 2257 | +            First guess for the parameter passed to `func` | 
|  | 2258 | +        x1 : float | 
|  | 2259 | +            Second guess for the parameter passed to `func` | 
|  | 2260 | +        target : float, optional | 
|  | 2261 | +            keff value to search for | 
|  | 2262 | +        k_tol : float, optional | 
|  | 2263 | +            Stopping criterion on the function value; the absolute value must be | 
|  | 2264 | +            within ``k_tol`` of zero to be accepted. | 
|  | 2265 | +        sigma_final : float, optional | 
|  | 2266 | +            Maximum accepted k-effective uncertainty for the stopping criterion. | 
|  | 2267 | +        p : float, optional | 
|  | 2268 | +            Exponent used in the stopping criterion. | 
|  | 2269 | +        q : float, optional | 
|  | 2270 | +            Multiplicative factor used in the stopping criterion. | 
|  | 2271 | +        memory : int, optional | 
|  | 2272 | +            Number of most-recent points used in the weighted linear fit of | 
|  | 2273 | +            ``f(x) = a + b x`` to predict the next point. | 
|  | 2274 | +        x_min : float, optional | 
|  | 2275 | +            Minimum allowed value for the parameter ``x``. | 
|  | 2276 | +        x_max : float, optional | 
|  | 2277 | +            Maximum allowed value for the parameter ``x``. | 
|  | 2278 | +        b0 : int, optional | 
|  | 2279 | +            Number of active batches to use for the initial function | 
|  | 2280 | +            evaluations. If None, uses the model's current setting. | 
|  | 2281 | +        b_min : int, optional | 
|  | 2282 | +            Minimum number of active batches to use in a function evaluation. | 
|  | 2283 | +        b_max : int, optional | 
|  | 2284 | +            Maximum number of active batches to use in a function evaluation. | 
|  | 2285 | +        maxiter : int, optional | 
|  | 2286 | +            Maximum number of iterations to perform. | 
|  | 2287 | +        output : bool, optional | 
|  | 2288 | +            Whether or not to display output showing iteration progress. | 
|  | 2289 | +        func_kwargs : dict, optional | 
|  | 2290 | +            Keyword-based arguments to pass to the `func` function. | 
|  | 2291 | +        run_kwargs : dict, optional | 
|  | 2292 | +            Keyword arguments to pass to :meth:`openmc.Model.run` or | 
|  | 2293 | +            :meth:`openmc.lib.run`. | 
|  | 2294 | +
 | 
|  | 2295 | +        Returns | 
|  | 2296 | +        ------- | 
|  | 2297 | +        SearchResult | 
|  | 2298 | +            Result object containing the estimated root (parameter value) and | 
|  | 2299 | +            evaluation history (parameters, means, standard deviations, and | 
|  | 2300 | +            batches), plus convergence status and termination reason. | 
|  | 2301 | +
 | 
|  | 2302 | +        """ | 
|  | 2303 | +        import openmc.lib | 
|  | 2304 | + | 
|  | 2305 | +        check_type('model modifier', func, Callable) | 
|  | 2306 | +        check_type('target', target, Real) | 
|  | 2307 | +        if memory < 2: | 
|  | 2308 | +            raise ValueError("memory must be ≥ 2") | 
|  | 2309 | +        func_kwargs = {} if func_kwargs is None else dict(func_kwargs) | 
|  | 2310 | +        run_kwargs = {} if run_kwargs is None else dict(run_kwargs) | 
|  | 2311 | +        run_kwargs.setdefault('output', False) | 
|  | 2312 | + | 
|  | 2313 | +        # Create lists to store the history of evaluations | 
|  | 2314 | +        xs: list[float] = [] | 
|  | 2315 | +        fs: list[float] = [] | 
|  | 2316 | +        ss: list[float] = [] | 
|  | 2317 | +        gs: list[int] = [] | 
|  | 2318 | +        count = 0 | 
|  | 2319 | + | 
|  | 2320 | +        # Helper function to evaluate f and store results | 
|  | 2321 | +        def eval_at(x: float, batches: int) -> tuple[float, float]: | 
|  | 2322 | +            # Modify the model with the current guess | 
|  | 2323 | +            func(x, **func_kwargs) | 
|  | 2324 | + | 
|  | 2325 | +            # Change the number of batches and run the model | 
|  | 2326 | +            batches += self.settings.inactive | 
|  | 2327 | +            if openmc.lib.is_initialized: | 
|  | 2328 | +                openmc.lib.settings.set_batches(batches) | 
|  | 2329 | +                openmc.lib.reset() | 
|  | 2330 | +                openmc.lib.run(**run_kwargs) | 
|  | 2331 | +                sp_filepath = f'statepoint.{batches}.h5' | 
|  | 2332 | +            else: | 
|  | 2333 | +                self.settings.batches = batches | 
|  | 2334 | +                sp_filepath = self.run(**run_kwargs) | 
|  | 2335 | + | 
|  | 2336 | +            # Extract keff and its uncertainty | 
|  | 2337 | +            with openmc.StatePoint(sp_filepath) as sp: | 
|  | 2338 | +                keff = sp.keff | 
|  | 2339 | + | 
|  | 2340 | +            if output: | 
|  | 2341 | +                nonlocal count | 
|  | 2342 | +                count += 1 | 
|  | 2343 | +                print(f'Iteration {count}: {batches=}, {x=:.6g}, {keff=:.5f}') | 
|  | 2344 | + | 
|  | 2345 | +            xs.append(float(x)) | 
|  | 2346 | +            fs.append(float(keff.n - target)) | 
|  | 2347 | +            ss.append(float(keff.s)) | 
|  | 2348 | +            gs.append(int(batches)) | 
|  | 2349 | +            return fs[-1], ss[-1] | 
|  | 2350 | + | 
|  | 2351 | +        # Default b0 to current model settings if not explicitly provided | 
|  | 2352 | +        if b0 is None: | 
|  | 2353 | +            b0 = self.settings.batches - self.settings.inactive | 
|  | 2354 | + | 
|  | 2355 | +        # Perform the search (inlined GRsecant) in a temporary directory | 
|  | 2356 | +        with TemporaryDirectory() as tmpdir: | 
|  | 2357 | +            if not openmc.lib.is_initialized: | 
|  | 2358 | +                run_kwargs.setdefault('cwd', tmpdir) | 
|  | 2359 | + | 
|  | 2360 | +            # ---- Seed with two evaluations | 
|  | 2361 | +            f0, s0 = eval_at(x0, b0) | 
|  | 2362 | +            if abs(f0) <= k_tol and s0 <= sigma_final: | 
|  | 2363 | +                return SearchResult(x0, xs, fs, ss, gs, True, "converged") | 
|  | 2364 | +            f1, s1 = eval_at(x1, b0) | 
|  | 2365 | +            if abs(f1) <= k_tol and s1 <= sigma_final: | 
|  | 2366 | +                return SearchResult(x1, xs, fs, ss, gs, True, "converged") | 
|  | 2367 | + | 
|  | 2368 | +            for _ in range(maxiter - 2): | 
|  | 2369 | +                # ------ Step 1: propose next x via GRsecant | 
|  | 2370 | +                m = min(memory, len(xs)) | 
|  | 2371 | + | 
|  | 2372 | +                # Perform a curve fit on f(x) = a + bx accounting for | 
|  | 2373 | +                # uncertainties. This is equivalent to minimizing the function | 
|  | 2374 | +                # in Equation (A.14) | 
|  | 2375 | +                (a, b), _ = curve_fit( | 
|  | 2376 | +                    lambda x, a, b: a + b*x, | 
|  | 2377 | +                    xs[-m:], fs[-m:], sigma=ss[-m:], absolute_sigma=True | 
|  | 2378 | +                ) | 
|  | 2379 | +                x_new = float(-a / b) | 
|  | 2380 | + | 
|  | 2381 | +                # Clamp x_new to the bounds if provided | 
|  | 2382 | +                if x_min is not None: | 
|  | 2383 | +                    x_new = max(x_new, x_min) | 
|  | 2384 | +                if x_max is not None: | 
|  | 2385 | +                    x_new = min(x_new, x_max) | 
|  | 2386 | + | 
|  | 2387 | +                # ------ Step 2: choose target σ for next run (Eq. 8 + clamp) | 
|  | 2388 | + | 
|  | 2389 | +                min_abs_f = float(np.min(np.abs(fs))) | 
|  | 2390 | +                base = q * sigma_final | 
|  | 2391 | +                ratio = min_abs_f / k_tol if k_tol > 0 else 1.0 | 
|  | 2392 | +                sig = base * (ratio ** p) | 
|  | 2393 | +                sig_target = max(sig, base) | 
|  | 2394 | + | 
|  | 2395 | +                # ------ Step 3: choose generations to hit σ_target (Appendix C) | 
|  | 2396 | + | 
|  | 2397 | +                # Use at least two past points for regression | 
|  | 2398 | +                if len(gs) >= 2 and np.var(np.log(gs)) > 0.0: | 
|  | 2399 | +                    # Perform a curve fit based on Eq. (C.3) to solve for ln(k). | 
|  | 2400 | +                    # Note that unlike in the paper, we do not leave r as an | 
|  | 2401 | +                    # undetermined parameter and choose r=0.5. | 
|  | 2402 | +                    (ln_k,), _ = curve_fit( | 
|  | 2403 | +                        lambda ln_b, ln_k: ln_k - 0.5*ln_b, | 
|  | 2404 | +                        np.log(gs[-4:]), np.log(ss[-4:]), | 
|  | 2405 | +                    ) | 
|  | 2406 | +                    k = float(np.exp(ln_k)) | 
|  | 2407 | +                else: | 
|  | 2408 | +                    k = float(ss[-1] * math.sqrt(gs[-1])) | 
|  | 2409 | + | 
|  | 2410 | +                b_new = (k / sig_target) ** 2 | 
|  | 2411 | + | 
|  | 2412 | +                # Clamp and round up to integer | 
|  | 2413 | +                b_new = max(b_min, math.ceil(b_new)) | 
|  | 2414 | +                if b_max is not None: | 
|  | 2415 | +                    b_new = min(b_new, b_max) | 
|  | 2416 | + | 
|  | 2417 | +                # Evaluate at proposed x with batches determined above | 
|  | 2418 | +                f_new, s_new = eval_at(x_new, b_new) | 
|  | 2419 | + | 
|  | 2420 | +                # Termination based on both criteria (|f| and σ) | 
|  | 2421 | +                if abs(f_new) <= k_tol and s_new <= sigma_final: | 
|  | 2422 | +                    return SearchResult(x_new, xs, fs, ss, gs, True, "converged") | 
|  | 2423 | + | 
|  | 2424 | +            return SearchResult(xs[-1], xs, fs, ss, gs, False, "maxiter") | 
|  | 2425 | + | 
|  | 2426 | + | 
|  | 2427 | +@dataclass | 
|  | 2428 | +class SearchResult: | 
|  | 2429 | +    """Result of a GRsecant keff search. | 
|  | 2430 | +
 | 
|  | 2431 | +    Attributes | 
|  | 2432 | +    ---------- | 
|  | 2433 | +    root : float | 
|  | 2434 | +        Estimated parameter value where f(x) = 0 at termination. | 
|  | 2435 | +    parameters : list[float] | 
|  | 2436 | +        Parameter values (x) evaluated during the search, in order. | 
|  | 2437 | +    keffs : list[float] | 
|  | 2438 | +        Estimated keff values for each evaluation. | 
|  | 2439 | +    stdevs : list[float] | 
|  | 2440 | +        One-sigma uncertainties of keff for each evaluation. | 
|  | 2441 | +    batches : list[int] | 
|  | 2442 | +        Number of active batches used for each evaluation. | 
|  | 2443 | +    converged : bool | 
|  | 2444 | +        Whether both |f| <= k_tol and sigma <= sigma_final were met. | 
|  | 2445 | +    flag : str | 
|  | 2446 | +        Reason for termination (e.g., "converged", "maxiter"). | 
|  | 2447 | +    """ | 
|  | 2448 | +    root: float | 
|  | 2449 | +    parameters: list[float] = field(repr=False) | 
|  | 2450 | +    means: list[float] = field(repr=False) | 
|  | 2451 | +    stdevs: list[float] = field(repr=False) | 
|  | 2452 | +    batches: list[int] = field(repr=False) | 
|  | 2453 | +    converged: bool | 
|  | 2454 | +    flag: str | 
|  | 2455 | + | 
|  | 2456 | +    @property | 
|  | 2457 | +    def function_calls(self) -> int: | 
|  | 2458 | +        """Number of function evaluations performed.""" | 
|  | 2459 | +        return len(self.parameters) | 
|  | 2460 | + | 
|  | 2461 | +    @property | 
|  | 2462 | +    def total_batches(self) -> int: | 
|  | 2463 | +        """Total number of active batches used across all evaluations.""" | 
|  | 2464 | +        return sum(self.batches) | 
|  | 2465 | + | 
|  | 2466 | + | 
0 commit comments