diff --git a/CHANGELOG.md b/CHANGELOG.md index 6cd8fbb7..568512ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Change log +## [v2.0.0-rc1](https://github.com/simvue-io/client/releases/tag/v2.0.0rc1) - 2025-03-06 +* Add new example notebooks +* Update and refactor examples to work with v2.0 +* Fix bug in offline artifacts using wrong file path +* Change names of sustainability metrics +* Fix `Self` being used in typing Generators so that Simvue works with Python 3.10 in Conda + ## [v2.0.0-alpha3](https://github.com/simvue-io/client/releases/tag/v2.0.0a3) - 2025-03-04 * Updated codecarbon to work with new API * Codecarbon now works with offline mode diff --git a/CITATION.cff b/CITATION.cff index 3fc71db0..c3ea9b9f 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -42,9 +42,9 @@ keywords: - alerting - simulation license: Apache-2.0 -commit: 64ff8a5344232d44fc7da5b6ff601d3023497977 -version: 2.0.0a3 -date-released: '2025-03-04' +commit: effbd2e88fa12a181bf33721eae599d4245e1484 +version: 2.0.0rc1 +date-released: '2025-03-06' references: - title: mlco2/codecarbon version: v2.8.2 diff --git a/README.md b/README.md index 974a361e..56538767 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Collect metadata, metrics and artifacts from simulations, processing and AI/ML t
- + diff --git a/examples/Bluemira/README.md b/examples/Bluemira/README.md new file mode 100644 index 00000000..7420fd0f --- /dev/null +++ b/examples/Bluemira/README.md @@ -0,0 +1,21 @@ +# Geometry optimisation using Bluemira + +[Bluemira](https://github.com/Fusion-Power-Plant-Framework/bluemira) is an integrated inter-disciplinary design tool for future fusion reactors. It incorporates several modules, some of which rely on other codes, to carry out a range of typical conceptual fusion reactor design activities. This example uses Simvue to track the optimisation of the geometry of a Princeton-D shaped magnet, while maintaining a safe minimum distance to the plasma of 0.5m. + + +To run this example, you will need to install Bluemira. For details of installation of Bluemira please refer to https://bluemira.readthedocs.io/en/develop/installation.html + +Once you have Bluemira installed and are running the `bluemita` conda environment (or similar), install Simvue with the plotting extras: +``` +pip install simvue[plot] +``` +Then move into the example's directory: +``` +cd examples/Bluemira +``` +Make a simvue.toml file - click Create New Run on the web UI, copy the contents listed, and paste into a config file. + +Finally, run the example: +``` +python geometry_optimisation.py +``` diff --git a/examples/Bluemira/geometry_optimisation.py b/examples/Bluemira/geometry_optimisation.py new file mode 100644 index 00000000..c996f191 --- /dev/null +++ b/examples/Bluemira/geometry_optimisation.py @@ -0,0 +1,197 @@ +# bluemira is an integrated inter-disciplinary design tool for future fusion +# reactors. It incorporates several modules, some of which rely on other +# codes, to carry out a range of typical conceptual fusion reactor design +# activities. +# +# Copyright (C) 2021 M. Coleman, J. Cook, F. Franza, I.A. Maione, S. McIntosh, +# J. Morris, D. Short +# +# bluemira is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# bluemira is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with bluemira; if not, see . +""" +Geometry Optimisation + +Example taken from: bluemira/examples/optimisation/geometry_optimisation.ex.py + +In this example we will go through how to set up a simple geometry +optimisation, including a geometric constraint. + +The problem to solve is, minimise the length of our wall boundary, +in the xz-plane, whilst keeping it a minimum distance from our plasma. + +We will greatly simplify this problem by working with a circular +plasma, we will use a PrincetonD for the wall shape, +and set the minimum distance to half a meter. +""" + +import numpy as np +import os +import sys +from bluemira.display import plot_2d +from bluemira.display.plotter import PlotOptions +from bluemira.geometry.optimisation import optimise_geometry +from bluemira.geometry.parameterisations import GeometryParameterisation, PrincetonD +from bluemira.geometry.tools import distance_to, make_circle +from bluemira.geometry.wire import BluemiraWire + +import simvue + +def f_objective(geom: GeometryParameterisation) -> float: + """Objective function to minimise a shape's length.""" + return geom.create_shape().length + + +def distance_constraint( + geom: GeometryParameterisation, boundary: BluemiraWire, min_distance: float, run: simvue.Run +) -> float: + """ + A constraint to keep a minimum distance between two shapes. + + The constraint must be in the form f(x) <= 0, i.e., constraint + is satisfied if f(x) <= 0. + + Since what we want is 'min_distance <= distance(A, B)', we rewrite + this in the form 'min_distance - distance(A, B) <= 0', and return + the left-hand side from this function. + """ + shape = geom.create_shape() + # Log all variables as metrics after each iteration, giving human readable names: + run.log_metrics( + { + "inboard_limb_radius": float(geom.variables["x1"].value), + "outboard_limb_radius": float(geom.variables["x2"].value), + "vertical_offset": float(geom.variables["dz"].value), + "length_of_wall": float(shape.length), + "distance_to_plasma": float(distance_to(shape, boundary)[0]) + } + ) + return min_distance - distance_to(shape, boundary)[0] + +# The original example prints stuff to the console to track progress +# Instead of changing these lines to log events (since we probably want both), +# We can make a class which intercepts stdout and also sends messages to Simvue +class StdoutToSimvue(): + def __init__(self, run: simvue.Run): + self.run = run + + def write(self, message: str): + # Log the message as an event (so long as it isnt a blank line) + if message.strip(): + run.log_event(message) + # And print to console as normal + sys.__stdout__.write(message) + + def flush(self): + sys.__stdout__.flush() + +# Here we will start doing our optimisation. First create a Simvue run, +# using the Run class as a context manager: +with simvue.Run() as run: + # Initialise our run: + run.init( + name="bluemira_geometry_optimisation", + folder="/simvue_client_demos", + visibility="tenant" if os.environ.get("CI") else None, + tags=["bluemira", "simvue_client_examples"], + description="Minimise the length of a parameterised geometry using gradient-based optimisation algorithm.", + ) + + # Redirect stdout so that print statements also get logged as events: + stdout_sender = StdoutToSimvue(run) + sys.stdout = stdout_sender + + # Next define the shape of our plasma, and the minimum distance we want between + # our wall boundary and our plasma: + min_distance = 0.5 + plasma = make_circle(radius=2, center=(8, 0, 0.25), axis=(0, 1, 0)) + + # As with any optimisation, it's important to pick a reasonable initial + # parameterisation. + wall_boundary = PrincetonD({ + "x1": {"value": 4, "upper_bound": 6}, + "x2": {"value": 12, "lower_bound": 10}, + }) + + print("Initial parameterisation:") + print(wall_boundary.variables) + print(f"Length of wall : {wall_boundary.create_shape().length}") + print(f"Distance to plasma: {distance_to(wall_boundary.create_shape(), plasma)[0]}") + + # Create metadata for our original parameters: + _metadata = { + var: { + "initial": wall_boundary.variables[var].value, + "lower_bound": wall_boundary.variables[var].lower_bound, + "upper_bound": wall_boundary.variables[var].upper_bound + } + for var in ["x1", "x2", "dz"] + } + run.update_metadata({"bluemira_parameters": _metadata}) + + # Create and upload an image of the initial design to Simvue + _plot = plot_2d([wall_boundary.create_shape(), plasma]) + _fig = _plot.get_figure() + run.save_object(_fig, category="input", name="initial_shape") + + # Optimise our geometry using a gradient descent method + result = optimise_geometry( + wall_boundary, + algorithm="SLSQP", + f_objective=f_objective, + opt_conditions={"ftol_abs": 1e-6}, + keep_history=True, + ineq_constraints=[ + { + "f_constraint": lambda g: distance_constraint(g, plasma, min_distance, run), + "tolerance": np.array([1e-8]), + }, + ], + ) + + # Print final results after optimisation + print("Optimised parameterisation:") + print(result.geom.variables) + + boundary = result.geom.create_shape() + print(f"Length of wall : {boundary.length}") + print(f"Distance to plasma: {distance_to(boundary, plasma)[0]}") + + # Update metadata with final optimised values + _metadata = { + var: { + "final": result.geom.variables[var].value, + } + for var in ["x1", "x2", "dz"] + } + run.update_metadata({"bluemira_parameters": _metadata}) + + # Create and upload an image of the optimised design to Simvue + _plot = plot_2d([boundary, plasma]) + _fig = _plot.get_figure() + run.save_object(_fig, category="output", name="final_shape") + + # Use the history to create and upload an image of the design iterations + geom = PrincetonD() + ax = plot_2d(plasma, show=False) + for i, (x, _) in enumerate(result.history): + geom.variables.set_values_from_norm(x) + wire = geom.create_shape() + wire_options = { + "alpha": 0.5 + ((i + 1) / len(result.history)) / 2, + "color": "red", + "linewidth": 0.1, + } + ax = plot_2d(wire, options=PlotOptions(wire_options=wire_options), ax=ax, show=False) + _plot = plot_2d(boundary, ax=ax, show=True) + _fig = _plot.get_figure() + run.save_object(_fig, category="output", name="design_iterations") \ No newline at end of file diff --git a/examples/FDS/activate_vents.fds b/examples/FDS/activate_vents.fds deleted file mode 100644 index 0245c7e4..00000000 --- a/examples/FDS/activate_vents.fds +++ /dev/null @@ -1,56 +0,0 @@ -&HEAD CHID='activate_vents', TITLE='Test of VENT activation/deactivation' / - -&MESH IJK=21,10,10, XB=0.0,2.1,0.0,1.0,0.0,1.0 / - -&TIME DT=0.05, T_END=20. / - -&SURF ID='BLOW 1', VEL=-0.2, COLOR='PURPLE', PART_ID='TRACER 1' / -&SURF ID='BLOW 2', VEL=-0.2, COLOR='RED', PART_ID='TRACER 2' / -&SURF ID='BLOW 3', VEL=-0.2, COLOR='ORANGE', PART_ID='TRACER 3' / -&SURF ID='BLOW 4', VEL=-0.2, COLOR='YELLOW', PART_ID='TRACER 4' / -&SURF ID='BLOW 5', VEL=-0.2, COLOR='GREEN', PART_ID='TRACER 5' / -&SURF ID='BLOW 6', VEL=-0.2, COLOR='CYAN', PART_ID='TRACER 6' / -&SURF ID='BLOW 7', VEL=-0.2, COLOR='BLUE', PART_ID='TRACER 7' / - -&PART ID='TRACER 1', MASSLESS=.TRUE., COLOR='PURPLE' / -&PART ID='TRACER 2', MASSLESS=.TRUE., COLOR='RED' / -&PART ID='TRACER 3', MASSLESS=.TRUE., COLOR='ORANGE' / -&PART ID='TRACER 4', MASSLESS=.TRUE., COLOR='YELLOW' / -&PART ID='TRACER 5', MASSLESS=.TRUE., COLOR='GREEN' / -&PART ID='TRACER 6', MASSLESS=.TRUE., COLOR='CYAN' / -&PART ID='TRACER 7', MASSLESS=.TRUE., COLOR='BLUE', DEVC_ID='timer 7b' / / - -&VENT XB=0.10,0.20,0.40,0.60,0.00,0.00, SURF_ID='BLOW 1', COLOR='PURPLE', CTRL_ID='controller 1' / -&VENT XB=0.40,0.50,0.40,0.60,0.00,0.00, SURF_ID='BLOW 2', COLOR='RED', DEVC_ID='timer 2' / -&VENT XB=0.70,0.80,0.40,0.60,0.00,0.00, SURF_ID='BLOW 3', COLOR='ORANGE', CTRL_ID='controller 3' / -&VENT XB=1.00,1.10,0.40,0.60,0.00,0.00, SURF_ID='BLOW 4', COLOR='YELLOW', CTRL_ID='controller 4' / -&VENT XB=1.30,1.40,0.40,0.60,0.00,0.00, SURF_ID='BLOW 5', COLOR='GREEN', DEVC_ID='timer 5' / -&VENT XB=1.60,1.70,0.40,0.60,0.00,0.00, SURF_ID='BLOW 6', COLOR='CYAN', DEVC_ID='timer 6' / -&VENT XB=1.90,2.00,0.40,0.60,0.00,0.00, SURF_ID='BLOW 7', COLOR='BLUE', DEVC_ID='timer 7' / - -&DEVC XYZ=0.1,0.1,0.1, ID='clock 1', QUANTITY='TIME' / -&DEVC XYZ=0.1,0.1,0.1, ID='timer 2', QUANTITY='TIME', SETPOINT= 5.0 / -&DEVC XYZ=0.1,0.1,0.1, ID='timer 5', QUANTITY='TIME', SETPOINT= 5.0 / -&DEVC XYZ=0.1,0.1,0.1, ID='timer 6', QUANTITY='TIME', SETPOINT= 6.0 / -&DEVC XYZ=0.1,0.1,0.1, ID='timer 7', QUANTITY='TIME', SETPOINT= 7.0 / -&DEVC XYZ=0.1,0.1,0.1, ID='timer 7b', QUANTITY='TIME', SETPOINT=11.0 / - -&CTRL ID='controller 1', FUNCTION_TYPE='CUSTOM', INPUT_ID='clock 1', RAMP_ID='ramp 1' / -&RAMP ID='ramp 1', T= 0.00, F=-1. / -&RAMP ID='ramp 1', T= 2.99, F=-1. / -&RAMP ID='ramp 1', T= 3.01, F= 1. / -&RAMP ID='ramp 1', T= 5.99, F= 1. / -&RAMP ID='ramp 1', T= 6.01, F=-1. / -&RAMP ID='ramp 1', T=11.99, F=-1. / -&RAMP ID='ramp 1', T=12.01, F= 1. / - -&CTRL ID='controller 3', FUNCTION_TYPE='TIME_DELAY', INPUT_ID='timer 2', DELAY=3. / -&CTRL ID='controller 4', FUNCTION_TYPE='ALL', INPUT_ID='controller 1','controller 3' / - -&VENT MB='XMIN', SURF_ID='OPEN' / -&VENT MB='XMAX', SURF_ID='OPEN' / -&VENT MB='YMIN', SURF_ID='OPEN' / -&VENT MB='YMAX', SURF_ID='OPEN' / -&VENT MB='ZMAX', SURF_ID='OPEN' / - -&TAIL / diff --git a/examples/FDS/fds_unlim b/examples/FDS/fds_unlim deleted file mode 100755 index 78b22dd9..00000000 --- a/examples/FDS/fds_unlim +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/bash - -ULIMIT_VALUE="" - -function help() { - echo "Usage: $0 [--ulimit value] [--help] FDS_INPUT_FILE" - echo "Options:" - echo " --ulimit value Set ulimit value" - echo " --help Display this message" -} - -args=$(getopt -o '' --long ulimit:,help -n "$0" -- "$@") -eval set -- "$args" - -while true; do - case "$1" in - --ulimit) - ULIMIT_VALUE=$2 - shift 2 - ;; - --help) - help - exit 0 - ;; - --) - shift - break - ;; - *) - echo "Unrecognised option '$1'" - help - exit 1 - ;; - esac -done - -if [ $# -eq 0 ]; then - echo "No input file provided." - help - exit 1 -fi - -if [ -z "$ULIMIT_VALUE" ]; then - fds $1 -else - ulimit -s $ULIMIT_VALUE && fds $1 -fi diff --git a/examples/FDS/minimal_fds.py b/examples/FDS/minimal_fds.py deleted file mode 100644 index 24a28d27..00000000 --- a/examples/FDS/minimal_fds.py +++ /dev/null @@ -1,87 +0,0 @@ -import os.path -import os -import logging -import datetime -import multiprocessing -import click - -from multiparser import FileMonitor -import multiparser.parsing.tail as mp_tail_parse -from simvue import Run - - -@click.command -@click.argument("input_file") -@click.argument("tracking_directory") -@click.option("--ci", is_flag=True, default=False) -def run_fds_example(input_file: str, tracking_directory: str, ci: bool) -> None: - logging.getLogger().setLevel(logging.DEBUG) - _trigger = multiprocessing.Event() - - with Run() as run: - - def debug_callback(data, meta, run_instance: Run = run): - data = {k.strip(): v.strip() for k, v in data.items()} - out_data = {} - if "Value" not in data: - return - - key = data["ID"].replace(" ", "_").strip() - value = float(data["Value"]) - time = datetime.datetime.fromtimestamp(float(data["Time (s)"])).strftime( - "%Y-%m-%d %H:%M:%S.%f" - ) - out_data[key] = value - - print(f"Recorded: {out_data}\n{meta}") - run_instance.log_metrics(out_data, timestamp=time) - - def meta_update(data, meta, run_instance: Run = run): - print(f"Received '{meta}'\n\n'{data}'") - run_instance.update_metadata(metadata={k: v for k, v in data.items() if v}) - - run.init( - "fire_simulator_demo", - folder="/simvue_client_demos", - tags=["FDS"], - description="Vent activation demo in FDS", - retention_period="1 hour" if ci else None, - visibility="tenant" if ci else None, - ) - - run.add_process( - "simulation", - executable="fds_unlim", - ulimit="unlimited", - input_file=f"{input_file}", - completion_trigger=_trigger, - print_stdout=True, - env=os.environ - | {"PATH": f"{os.environ['PATH']}:{os.path.dirname(__file__)}"}, - ) - - with FileMonitor( - per_thread_callback=debug_callback, - exception_callback=run.log_event, - interval=1, - log_level=logging.DEBUG, - flatten_data=True, - plain_logging=True, - termination_trigger=_trigger, - ) as monitor: - monitor.track( - path_glob_exprs=input_file, - callback=meta_update, - file_type="fortran", - static=True, - ) - monitor.tail( - path_glob_exprs=os.path.join(tracking_directory, "*_devc*.csv"), - parser_func=mp_tail_parse.record_csv, - parser_kwargs={"header_pattern": "Time"}, - ) - monitor.run() - - -if __name__ in "__main__": - run_fds_example() diff --git a/examples/FDS/requirements.txt b/examples/FDS/requirements.txt deleted file mode 100644 index 753f2156..00000000 --- a/examples/FDS/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -ukaea-multiparser[fortran] -click diff --git a/examples/Geant4/README.md b/examples/Geant4/README.md new file mode 100644 index 00000000..400e75f3 --- /dev/null +++ b/examples/Geant4/README.md @@ -0,0 +1,41 @@ +# Geant4 Example +[Geant4](https://geant4.web.cern.ch/) is a toolkit for the simulation of the passage of particles through matter. We will use Simvue to track repeating simulations of a proton fired at a target of beryllium, monitoring the yield of key particles of interest. + +To run this example, run the Geant4 docker container: +``` +docker run --rm -it artemisbeta/geant4:11.2.1 +``` +Then clone this repository, using recursive to also clone the Geant4 example submodule: +``` +git clone --recursive https://github.com/simvue-io/python-api.git +``` +Move into the example directory: +``` +cd python-api/examples/Geant4 +``` +Create a virtual environment: +``` +apt install python3.12-venv + +python3 -m venv venv + +source venv/bin/activate +``` +Install requirements: +``` +python3 -m pip install -r requirements.txt +``` +Make a simvue.toml file - click Create New Run on the web UI, copy the contents listed, and paste into a config file using: +``` +vi simvue.toml +``` +Make and build the Geant4 binaries required: +``` +cmake -DCMAKE_PREFIX_PATH=/usr/local/share/geant4/install/4.11.2/ -Bbuild FixedTarget/ + +cmake --build build +``` +And then run the example: +``` +python3 geant4_simvue.py build/MaterialTesting --events 10 +``` \ No newline at end of file diff --git a/examples/Geant4/geant4_simvue.py b/examples/Geant4/geant4_simvue.py index 2a864e0f..17ddb0c7 100644 --- a/examples/Geant4/geant4_simvue.py +++ b/examples/Geant4/geant4_simvue.py @@ -29,33 +29,37 @@ def geant4_simvue_example( g4_binary: str, config: str | None, ci: bool, momentum: float, events: int ) -> None: - @mp_file_parse.file_parser + def root_file_parser( - file_name: str, *_, **__ + input_file: str, *_, **__ ) -> tuple[dict[str, typing.Any], dict[str, typing.Any]]: - with uproot.open(file_name) as root_data: + """ + This function will parse the ROOT file which Geant4 produces as an output, + and format the data as a dictionary of key/value pairs for upload as metrics. + """ + with uproot.open(input_file) as root_data: hit_data: dict[str, uproot.TBranch] if not (hit_data := root_data.get("Hits")): raise RuntimeError("Expected key 'Hits' in ROOT file") - particles_of_interest = [2212, 211, 11, 22, 2112] + particles_of_interest = [2212, 211, 11, 22, 2112] - all_particles = hit_data["fID"].array(library="np").tolist() + all_particles = hit_data["fID"].array(library="np").tolist() - out_data = { - Particle.from_pdgid(abs(identifier)) - .name.replace("+", "plus") - .replace("-", "minus"): [abs(i) for i in all_particles].count( - abs(identifier) - ) - for identifier in particles_of_interest - } - - return {}, out_data - - termination_trigger = multiprocessing.Event() + out_data = { + Particle.from_pdgid(abs(identifier)) + .name.replace("+", "plus") + .replace("-", "minus"): [abs(i) for i in all_particles].count( + abs(identifier) + ) + for identifier in particles_of_interest + } + return out_data + + # Use the Simvue Run as a context manager with simvue.Run() as run: + # Initialize a single run for all simulations we are tracking run.init( "Geant4_simvue_demo", folder="/simvue_client_demos", @@ -68,55 +72,43 @@ def root_file_parser( ) kwargs: dict[str, typing.Any] = {} - if config: kwargs["script"] = config - with tempfile.TemporaryDirectory() as tempd: - with multiparser.FileMonitor( - per_thread_callback=lambda metrics, *_: run.log_metrics(metrics), - exception_callback=run.log_event, - terminate_all_on_fail=False, - plain_logging=True, - flatten_data=True, - termination_trigger=termination_trigger, - ) as monitor: - monitor.track( - path_glob_exprs=[f'{pathlib.Path(tempd).joinpath("*")}'], - parser_func=root_file_parser, - static=True, - ) - monitor.run() - - for i in range(events): - if i % 10 == 0: - click.secho( - f"Running {i+1}/{events} with momentum {momentum} GeV", - bold=True, - fg="cyan", - ) - running_simulation = multiprocessing.Event() - run.add_process( - identifier=f"Geant4_simulation_{momentum}GeV_{i}", - executable=g4_binary, - momentum=momentum, - batch=True, - output=pathlib.Path(tempd).joinpath( - f"output_{momentum}GeV_{i+1}.root" - ), - completion_trigger=running_simulation - if i == events - 1 - else None, - **kwargs, - ) - - termination_trigger.set() - - for file in pathlib.Path().cwd().glob("Geant4*.err"): - os.remove(file) - - for file in pathlib.Path().cwd().glob("Geant4*.out"): - os.remove(file) + for i in range(events): + # Create new multiprocessing Trigger which will register when the simulation is complete + _trigger = multiprocessing.Event() + + if i % 10 == 0: + click.secho( + f"Running {i+1}/{events} with momentum {momentum} GeV", + bold=True, + fg="cyan", + ) + _output_file_path = pathlib.Path.cwd().joinpath( + f"Geant4_simvue_output_{momentum}GeV_{i+1}.root" + ) + # Add the Geant4 simulation as a process, passing in command line arguments as extra kwargs + # Also set the completion_trigger to our trigger, so that it is set once the sim is complete + run.add_process( + identifier=f"Geant4_simulation_{momentum}GeV_{i}", + executable=g4_binary, + momentum=momentum, + batch=True, + output=_output_file_path, + completion_trigger=_trigger, + **kwargs, + ) + # Wait until simulation completes + _trigger.wait() + + # Upload the parsed results from the ROOT file as metrics, and upload the ROOT file as an output + run.log_metrics(root_file_parser(str(_output_file_path))) + run.save_file(_output_file_path, category="output") + + # Delete any results files since these are now uploaded to Simvue! + for file in pathlib.Path().cwd().glob("Geant4_simvue_*"): + file.unlink() if __name__ in "__main__": geant4_simvue_example() diff --git a/examples/GeometryOptimisation/bluemira_simvue_geometry_optimisation.py b/examples/GeometryOptimisation/bluemira_simvue_geometry_optimisation.py deleted file mode 100644 index 28c15dae..00000000 --- a/examples/GeometryOptimisation/bluemira_simvue_geometry_optimisation.py +++ /dev/null @@ -1,179 +0,0 @@ -# bluemira is an integrated inter-disciplinary design tool for future fusion -# reactors. It incorporates several modules, some of which rely on other -# codes, to carry out a range of typical conceptual fusion reactor design -# activities. -# -# Copyright (C) 2021 M. Coleman, J. Cook, F. Franza, I.A. Maione, S. McIntosh, J. Morris, -# D. Short -# -# bluemira is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# bluemira is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with bluemira; if not, see . -""" -A quick tutorial on the optimisation of geometry in bluemira -""" - -import os -import logging - -from bluemira.geometry.optimisation import GeometryOptimisationProblem -from bluemira.geometry.parameterisations import PrincetonD -from bluemira.utilities.opt_problems import OptimisationObjective -from bluemira.utilities.optimiser import Optimiser, approx_derivative - -from simvue import Handler, Run - -# Let's set up a simple GeometryOptimisationProblem, where we minimise the length of -# parameterised geometry. - -# First, we set up the GeometryParameterisation, with some bounds on the variables. -x1_lower = 2 -x1_value = 2.05 -x1_upper = 6 -x2_lower = 80 -x2_value = 198.5 -x2_upper = 260 -dz_lower = -0.5 -dz_upper = 0.5 -max_eval = 500 -ftol_abs = 1e-12 -ftol_rel = 1e-12 - -run = Run() - -logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) -sth = Handler(run) -logger.addHandler(sth) - -run.init( - metadata={ - "dataset.x1_lower": x1_lower, - "dataset.x1_upper": x1_upper, - "dataset.x1_value": x1_value, - "dataset.x2_lower": x2_lower, - "dataset.x2_upper": x2_upper, - "dataset.x2_value": x2_value, - "dataset.dz_lower": dz_lower, - "dataset.dz_upper": dz_upper, - "optimiser.max_eval": max_eval, - "optimiser.ftol_abs": ftol_abs, - "optimiser.ftol_rel": ftol_rel, - }, - folder="/simvue_client_demos", - visibility="tenant" if os.environ.get("CI") else None, - tags=["bluemira", "simvue_client_examples"], - description="A simple GeometryOptimisationProblem, where we minimise the length of parameterised geometry using gradient-based optimisation algorithm.", -) - -logger.info("Initialised run") -logger.info("Create parameterisation") -parameterisation_1 = PrincetonD( - { - "x1": {"lower_bound": x1_lower, "value": x1_value, "upper_bound": x1_upper}, - "x2": {"lower_bound": x2_lower, "value": x2_value, "upper_bound": x2_upper}, - "dz": {"lower_bound": dz_lower, "value": 0, "upper_bound": dz_upper}, - } -) - -# Here we're minimising the length, and we can work out that the dz variable will not -# affect the optimisation, so let's just fix at some value and remove it from the problem -parameterisation_1.fix_variable("dz", value=0) - -# Now, we set up our optimiser. We'll start with a gradient-based optimisation algorithm -logger.info("Define Optimiser") - -slsqp_optimiser = Optimiser( - "SLSQP", - opt_conditions={"max_eval": max_eval, "ftol_abs": ftol_abs, "ftol_rel": ftol_rel}, -) - - -# Define the call back function -def calculate_length(vector, parameterisation): - """ - Calculate the length of the parameterised shape for a given state vector. - """ - - parameterisation.variables.set_values_from_norm(vector) - print("logging metrics", float(parameterisation.variables["x1"].value)) - run.log_metrics( - { - "x1_value": float(parameterisation.variables["x1"].value), - "x1_lower": float(parameterisation.variables["x1"].lower_bound), - "x1_upper": float(parameterisation.variables["x1"].upper_bound), - } - ) - run.log_metrics( - { - "x2_value": float(parameterisation.variables["x2"].value), - "x2_lower": float(parameterisation.variables["x2"].lower_bound), - "x2_upper": float(parameterisation.variables["x2"].upper_bound), - } - ) - - return parameterisation.create_shape().length - - -def my_minimise_length(vector, grad, parameterisation, ad_args=None): - """ - Objective function for nlopt optimisation (minimisation) of length. - - Parameters - ---------- - vector: np.ndarray - State vector of the array of coil currents. - grad: np.ndarray - Local gradient of objective function used by LD NLOPT algorithms. - Updated in-place. - ad_args: Dict - Additional arguments to pass to the `approx_derivative` function. - - Returns - ------- - fom: Value of objective function (figure of merit). - """ - ad_args = ad_args if ad_args is not None else {} - print(vector) - length = calculate_length(vector, parameterisation) - if grad.size > 0: - grad[:] = approx_derivative( - calculate_length, vector, f0=length, args=(parameterisation,), **ad_args - ) - run.update_metadata( - { - "x1_value": float(parameterisation.variables["x1"].value), - "x2_value": float(parameterisation.variables["x2"].value), - } - ) - return length - - -# Next, we make our objective function, using in this case one of the ready-made ones. -# NOTE: This `minimise_length` function includes automatic numerical calculation of the -# objective function gradient, and expects a certain signature. -objective = OptimisationObjective( - my_minimise_length, - f_objective_args={"parameterisation": parameterisation_1}, -) - - -# Finally, we initialise our `GeometryOptimisationProblem` and run it. -logger.info("Call optimiser") -my_problem = GeometryOptimisationProblem(parameterisation_1, slsqp_optimiser, objective) -my_problem.optimise() - - -# Here we're minimising the length, within the bounds of our PrincetonD parameterisation, -# so we'd expect that x1 goes to its upper bound, and x2 goes to its lower bound. -run.save_file("bluemira_simvue_geometry_optimisation.py", "code") -run.close() diff --git a/examples/GeometryOptimisation/readme.md b/examples/GeometryOptimisation/readme.md deleted file mode 100644 index 542a6433..00000000 --- a/examples/GeometryOptimisation/readme.md +++ /dev/null @@ -1,7 +0,0 @@ -# Geometry optimisation using Bluemira - -Bluemira is an integrated inter-disciplinary design tool for future fusion reactors. It incorporates several modules, some of which rely on other codes, -to carry out a range of typical conceptual fusion reactor design activities. (https://github.com/Fusion-Power-Plant-Framework/bluemira) - - -For details of installation of Bluemira please refer to https://bluemira.readthedocs.io/en/develop/installation.html diff --git a/examples/Logging/README.md b/examples/Logging/README.md index b921bbe5..38ac713a 100644 --- a/examples/Logging/README.md +++ b/examples/Logging/README.md @@ -1 +1,24 @@ # Logging +You can use Simvue as a logging handler, so that you can easily upload logging messages to the Events log of a Simvue run. + +To run this example, move into this directory: +``` +cd examples/Logging +``` +Setup a virtual environment: +``` +python3 -m venv venv +source ./venv/bin/activate +``` +Install the required dependencies: +``` +pip install simvue +``` +Create a `simvue.toml` file by going to the web UI, clicking 'Create New Run', and copying the details given into the file, eg using: +``` +nano simvue.toml +``` +Run the code: +``` +python3 logging-to-simvue.py +``` \ No newline at end of file diff --git a/examples/Logging/logging-to-simvue.py b/examples/Logging/logging-to-simvue.py index ff6e851c..886ecc0d 100644 --- a/examples/Logging/logging-to-simvue.py +++ b/examples/Logging/logging-to-simvue.py @@ -22,3 +22,6 @@ def simvue_logger_demo(ci: bool) -> None: logger.addHandler(sth) logger.info("This is a Simvue logging test") + +if __name__ == "__main__": + simvue_logger_demo() \ No newline at end of file diff --git a/examples/OpenFOAM/Dockerfile b/examples/OpenFOAM/Dockerfile deleted file mode 100644 index 3f827e82..00000000 --- a/examples/OpenFOAM/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM openfoam/openfoam10-paraview56 -ARG simvue_file -USER root -RUN add-apt-repository ppa:deadsnakes/ppa -y -RUN apt-get update && \ - apt-get install -y python3.11-full -RUN useradd simvue -m -COPY . /simvue_client -RUN python3.11 -m ensurepip --upgrade -RUN python3.11 -m pip install /simvue_client -RUN python3.11 -m pip install -r /simvue_client/examples/OpenFOAM/requirements.txt diff --git a/examples/OpenFOAM/README.md b/examples/OpenFOAM/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/OpenFOAM/requirements.txt b/examples/OpenFOAM/requirements.txt deleted file mode 100644 index 18e44044..00000000 --- a/examples/OpenFOAM/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -ukaea-multiparser>=1.0.1 diff --git a/examples/OpenFOAM/simvue_openfoam.py b/examples/OpenFOAM/simvue_openfoam.py deleted file mode 100644 index c092cba6..00000000 --- a/examples/OpenFOAM/simvue_openfoam.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -OpenFOAM v10 Simvue Example - -This Simvue example launches the MovingCone example within the OpenFoam10 tutorials. - -The contents of the log.PimpleFoam file are parsed using multiparser. - -To run this example within an OpenFOAM 10 Docker container -ensure you have either a Simvue config file or you have -set the values for SIMVUE_TOKEN and SIMVUE_URL and run: - -python simvue_openfoam.py /opt/openfoam10/tutorials/incompressible/pimpleFoam/laminar/movingCone/Allrun - -""" - -import os -import re -import click -import uuid -import simvue -import multiprocessing -import multiparser -import multiparser.parsing.tail as mp_tail_parse - -from typing import Any - - -@click.command -@click.argument("all_run_script", type=click.Path(exists=True)) -@click.option("--ci", is_flag=True, default=False) -def open_foam_simvue_demo(all_run_script: str, ci: bool) -> None: - """Run the Allrun file for the given simulation and parse the log.PimpleFoam content - - Parameters - ---------- - all_run_script : str - path of the Allrun execution script - """ - # Regular expressions - - uniq_id: str = f"{uuid.uuid4()}".split("-")[0] - - @mp_tail_parse.log_parser - def custom_parser(file_content: str, **__) -> tuple[dict[str, Any], dict[str, Any]]: - exp1: re.Pattern[str] = re.compile( - "^(.+): Solving for (.+), Initial residual = (.+), Final residual = (.+), No Iterations (.+)$" - ) - exp2: re.Pattern[str] = re.compile("^ExecutionTime = ([0-9.]+) s") - metrics = {} - - for line in file_content.splitlines(): - # Get time - match = exp2.match(line) - if match: - ttime = match.group(1) - if metrics: - run.log_metrics(metrics, time=ttime) - metrics = {} - - # Get metrics - match = exp1.match(line) - if match: - metrics["residuals.initial.%s" % match.group(2)] = match.group(3) - metrics["residuals.final.%s" % match.group(2)] = match.group(4) - return {}, metrics - - log_location: str = os.path.dirname(all_run_script) - termination_trigger = multiprocessing.Event() - - with simvue.Run() as run: - run.init( - f"open_foam_demo_{uniq_id}", - folder="/simvue_client_demos", - tags=["OpenFOAM", "simvue_client_examples"], - retention_period="1 hour" if ci else None, - visibility="tenant" if ci else None, - ) - run.add_process( - identifier="OpenFOAM", - executable="/bin/sh", - script=all_run_script, - completion_callback=lambda *_, **__: termination_trigger.set(), - ) - with multiparser.FileMonitor( - per_thread_callback=lambda metrics, *_: run.log_metrics(metrics), - exception_callback=run.log_event, - terminate_all_on_fail=True, - plain_logging=True, - flatten_data=True, - interval=0.1, - termination_trigger=termination_trigger, - ) as monitor: - monitor.tail( - parser_func=custom_parser, - path_glob_exprs=[os.path.join(log_location, "log.pimpleFoam")], - ) - monitor.run() - - -if __name__ in "__main__": - open_foam_simvue_demo() diff --git a/examples/Optuna/PyTorch/README.md b/examples/Optuna/PyTorch/README.md deleted file mode 100644 index d3cbc80d..00000000 --- a/examples/Optuna/PyTorch/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# PyTorch - -This example is based on the Medium post https://medium.com/optuna/optuna-meets-weights-and-biases-58fc6bab893. - -> We optimize the validation accuracy of hand-written -> digit recognition using -> PyTorch and FashionMNIST. We optimize the neural network architecture as well as the optimizer -> configuration. As it is too time consuming to use the whole FashionMNIST dataset, -> we here use a small subset of it. - -Setup a virtual environment: -``` -python3 -m venv venv -source ./venv/bin/activate -``` -Install the required dependencies: -``` -pip install -r requirements.txt -``` -Run the code: -``` -python3 simvue_optuna_pytorch.py -``` -By default this will create a number runs in Simvue, all in a folder with name `/optuna/tests/-`, where `` is a random -adjective and `` is a random noun. Metadata and a metric `validation accuracy` is collected during each run. The metadata includes -an attribute `state` which indicates if the run completed successfully or was pruned. diff --git a/examples/Optuna/README.md b/examples/Optuna/README.md index 7169b646..ca2c0de4 100644 --- a/examples/Optuna/README.md +++ b/examples/Optuna/README.md @@ -1,3 +1,34 @@ -# Optuna +# PyTorch -Examples using the Optuna hyperparameter optimization framework. +This example is based on the Medium post https://medium.com/optuna/optuna-meets-weights-and-biases-58fc6bab893. + +> We optimize the validation accuracy of hand-written +> digit recognition using +> PyTorch and FashionMNIST. We optimize the neural network architecture as well as the optimizer +> configuration. As it is too time consuming to use the whole FashionMNIST dataset, +> we here use a small subset of it. + +To run this example, move into this directory: +``` +cd examples/Optuna +``` +Setup a virtual environment: +``` +python3 -m venv venv +source ./venv/bin/activate +``` +Install the required dependencies: +``` +pip install -r requirements.txt +``` +Create a `simvue.toml` file by going to the web UI, clicking 'Create New Run', and copying the details given into the file, eg using: +``` +nano simvue.toml +``` +Run the code: +``` +python3 simvue_optuna_pytorch.py +``` +By default this will create a number runs in Simvue, all in a folder with name `/optuna/tests/-`, where `` is a random +adjective and `` is a random noun. Metadata and a metric `validation accuracy` is collected during each run. The metadata includes +an attribute `state` which indicates if the run completed successfully or was pruned. diff --git a/examples/Optuna/PyTorch/requirements.txt b/examples/Optuna/requirements.txt similarity index 100% rename from examples/Optuna/PyTorch/requirements.txt rename to examples/Optuna/requirements.txt diff --git a/examples/Optuna/PyTorch/simvue_optuna_pytorch.py b/examples/Optuna/simvue_optuna_pytorch.py similarity index 97% rename from examples/Optuna/PyTorch/simvue_optuna_pytorch.py rename to examples/Optuna/simvue_optuna_pytorch.py index ceba3a7e..c0905061 100644 --- a/examples/Optuna/PyTorch/simvue_optuna_pytorch.py +++ b/examples/Optuna/simvue_optuna_pytorch.py @@ -29,7 +29,7 @@ @click.option("--batch-size", type=int, default=BATCHSIZE, show_default=True) @click.option("--train-examples", type=int, default=BATCHSIZE * 30, show_default=True) @click.option("--valid-examples", type=int, default=BATCHSIZE * 10, show_default=True) -@click.option("--trials", type=int, default=100, show_default=True) +@click.option("--trials", type=int, default=5, show_default=True) @click.option("--timeout", type=int, default=600, show_default=True) @click.option("--ci", is_flag=True, default=False) def run_optuna_example( @@ -135,6 +135,7 @@ def objective(trial): with Run() as run: run.init( + name=f"simvue_optuna_example_trial_{trial.number}", folder="/optuna/tests/%s" % FOLDER_NAME, metadata=config, tags=["pytorch", "simvue_client_examples"], diff --git a/examples/PyTorch/README.md b/examples/PyTorch/README.md new file mode 100644 index 00000000..2c9abe28 --- /dev/null +++ b/examples/PyTorch/README.md @@ -0,0 +1,25 @@ +# PyTorch + +This is an example of using Simvue to track and monitor the training of a Machine Learning model using PyTorch. + +To run this example, move into this directory: +``` +cd examples/PyTorch +``` +Setup a virtual environment: +``` +python3 -m venv venv +source ./venv/bin/activate +``` +Install the required dependencies: +``` +pip install -r requirements.txt +``` +Create a `simvue.toml` file by going to the web UI, clicking 'Create New Run', and copying the details given into the file, eg using: +``` +nano simvue.toml +``` +Run the code: +``` +python3 main.py +``` diff --git a/examples/PyTorch/main.py b/examples/PyTorch/main.py index bc3a78ed..c17a321a 100644 --- a/examples/PyTorch/main.py +++ b/examples/PyTorch/main.py @@ -1,3 +1,19 @@ +"""PyTorch Example +=================== + +This is an example of how to track PyTorch ML model training with Simvue. + +To run this example, do: + + pip install -r examples/PyTorch/requirements.txt + python examples/PyTorch/main.py + +You can optionally specify command line arguments to change the batch or +epoch size, training hardware details, learning rate etc. To see possible options: + + python examples/PyTorch/main.py --help +""" + # Taken from https://github.com/pytorch/examples/blob/main/mnist/main.py from __future__ import print_function @@ -121,7 +137,7 @@ def test(model, device, test_loader, epoch, run): @click.option( "--epochs", type=int, - default=14, + default=5, help="number of epochs to train", show_default=True, ) @@ -225,6 +241,7 @@ def simvue_pytorch_example( with Run() as run: run.init( + name="PyTorch_Simvue_Example", tags=["PyTorch", "simvue_client_examples"], folder="/simvue_client_demos", retention_period="1 hour" if ci else None, diff --git a/examples/README.md b/examples/README.md index 4fd4f545..53965356 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,17 +1,25 @@ # Examples -Here we provide a number of different examples using different applications. +Here we provide a number of different examples using different applications. See the `README` files in each directory for instructions on how to run each example. -## Tensorflow -[Basic example](Tensorflow/) +For more examples using our provided Integrations, including for MOOSE, OpenFoam, FDS, and TensorFlow, [check out the examples in the Integrations repository.](https://github.com/simvue-io/integrations) -## Optuna -[PyTorch](Optuna/PyTorch/) +## Bluemira -## OpenFOAM +An example using Simvue to track simulations from [Bluemira](https://bluemira.readthedocs.io/en/develop/introduction.html) - a Python based framework for the design of nuclear fusion reactors. -## SU2 -[External inviscid flow around a 3D geometry](SU2/) +## Geant4 + +An example using Simvue to track simulations from [Geant4](https://geant4.web.cern.ch/) - a toolkit for the simulation of the passage of particles through matter. ## Logging -[Example](Logging/) +An example of using Simvue as a logging handler to upload messages from Python loggers to the Events log. + +## Optuna +An example of using Simvue to track the optimisation of hyperparameters in ML models using [Optuna](https://optuna.org/) + +## PyTorch +An example of using Simvue to track the training and validation of Machine Learning models using (PyTorch)[https://pytorch.org/] + +## SU2 +An example of using Simvue to track a simulation in [SU2](https://su2code.github.io/) - a Multiphysics simulation and design software \ No newline at end of file diff --git a/examples/SU2/README.md b/examples/SU2/README.md index ae56cc81..99df6228 100644 --- a/examples/SU2/README.md +++ b/examples/SU2/README.md @@ -2,6 +2,11 @@ [SU2](https://su2code.github.io/) is open-source multi-physics and simulation design software. This Simvue example is taken from one of the tutorials: https://su2code.github.io/tutorials/Inviscid_ONERAM6/. + +To run this example, move into this directory: +``` +cd examples/SU2 +``` Setup a Python virtual environment: ``` python3 -m venv venv @@ -11,6 +16,10 @@ Install Simvue: ``` pip install simvue ``` +Create a `simvue.toml` file by going to the web UI, clicking 'Create New Run', and copying the details given into: +``` +nano simvue.toml +``` Download and install the appropriate version of SU2, e.g. on Linux: ``` wget https://github.com/su2code/SU2/releases/download/v7.0.2/SU2-v7.0.2-linux64-mpi.zip diff --git a/examples/SU2/SU2.py b/examples/SU2/SU2.py index 16eff62d..60c105c3 100644 --- a/examples/SU2/SU2.py +++ b/examples/SU2/SU2.py @@ -1,15 +1,91 @@ import os -import multiprocessing import click -import multiparser import requests +from typing import Any + import multiparser.parsing.tail as mp_tail_parse import multiparser.parsing.file as mp_file_parse -from typing import Any +from simvue_integrations.connectors.generic import WrappedRun -import simvue +# # Create a custom class which inherits from WrappedRun: +class SU2Run(WrappedRun): + # Store these output files + output_files: list[str] = ["flow.vtk", "surface_flow.vtk", "restart_flow.dat"] + + # Collect these metadata attributes from the config file + metadata_attrs: list[str] = [ + "SOLVER", + "MATH_PROBLEM", + "MACH_NUMBER", + "AOA", + "SIDESLIP_ANGLE", + "FREESTREAM_PRESSURE", + "FREESTREAM_TEMPERATURE", + ] + + @mp_file_parse.file_parser + def metadata_parser(self, input_file: str, **_) -> tuple[dict[str, Any], dict[str, Any]]: + metadata = {"SU2": {}} + with open(input_file) as in_csv: + file_content = in_csv.read() + + for line in file_content.splitlines(): + for attr in self.metadata_attrs: + if line.startswith(attr): + metadata["SU2"][attr.lower()] = line.split("%s= " % attr)[1].strip() + return {}, metadata + + def _pre_simulation(self): + super()._pre_simulation() + + environment: dict[str, str] = os.environ.copy() + environment["PATH"] = ( + f"{os.path.abspath(self.su2_binary_directory)}:{os.environ['PATH']}" + ) + environment["PYTHONPATH"] = ( + f"{os.path.abspath(self.su2_binary_directory)}{f':{pypath}' if (pypath := os.environ.get('PYTHONPATH')) else ''}" + ) + + self.add_process( + identifier="SU2_simulation", + executable="SU2_CFD", + script=self.config_filename, + env=environment, + completion_trigger=self._trigger, + ) + + def _during_simulation(self): + self.file_monitor.track( + path_glob_exprs=self.config_filename, + parser_func=self.metadata_parser, + callback=lambda meta, *_: self.update_metadata(meta), + static=True, + ) + self.file_monitor.tail( + path_glob_exprs=["history.csv"], + parser_func=mp_tail_parse.record_csv, + callback=lambda metrics, *_: self.log_metrics( + { + key.replace("[", "_").replace("]", ""): value + for key, value in metrics.items() + } + + )) + + def _post_simulation(self): + for file in self.output_files: + if os.path.exists(file): + self.save_file(file, "output") + + super()._post_simulation() + + + def launch(self, su2_binary_directory: str, config_filename: str): + self.su2_binary_directory = su2_binary_directory + self.config_filename = config_filename + super().launch() @click.command @@ -20,9 +96,6 @@ def run_su2_example( su2_binary_directory: str, config: str | None, mesh: str | None, ci: bool ) -> None: - # Name of history file to collect metrics from - HISTORY: str = "history.csv" - config_url = ( config or "https://raw.githubusercontent.com/su2code/Tutorials/master/compressible_flow/Inviscid_ONERAM6/inv_ONERAM6.cfg" @@ -50,47 +123,17 @@ def run_su2_example( with open(file_name, "wb") as out_f: out_f.write(req_response.content) - # Store these output files - OUTPUT_FILES: list[str] = ["flow.vtk", "surface_flow.vtk", "restart_flow.dat"] - - for file_name in OUTPUT_FILES + [HISTORY]: - if os.path.exists(file_name): - os.remove(file_name) - - # Collect these metadata attributes from the config file - METADATA_ATTRS: list[str] = [ - "SOLVER", - "MATH_PROBLEM", - "MACH_NUMBER", - "AOA", - "SIDESLIP_ANGLE", - "FREESTREAM_PRESSURE", - "FREESTREAM_TEMPERATURE", - ] - - @mp_file_parse.file_parser - def metadata_parser(file_name: str, **_) -> tuple[dict[str, Any], dict[str, Any]]: - metadata = {} - with open(file_name) as in_csv: - file_content = in_csv.read() - - for line in file_content.splitlines(): - for attr in METADATA_ATTRS: - if line.startswith(attr): - metadata[attr] = line.split("%s= " % attr)[1].strip() - return {}, metadata - - termination_trigger = multiprocessing.Event() - - environment: dict[str, str] = os.environ.copy() - environment["PATH"] = ( - f"{os.path.abspath(su2_binary_directory)}:{os.environ['PATH']}" - ) - environment["PYTHONPATH"] = ( - f"{os.path.abspath(su2_binary_directory)}{f':{pypath}' if (pypath := os.environ.get('PYTHONPATH')) else ''}" - ) - - with simvue.Run() as run: + # Use your custom class as a context manager in the same way you'd use a Simvue Run + with SU2Run() as run: + + # Delete any previous results files + for file_name in run.output_files + ["history.csv"]: + if os.path.exists(file_name): + os.remove(file_name) + + # Since WrappedRun inherits from Simvue Run, you have access to all normal methods + + # Start by initialising the run run.init( "SU2_simvue_demo", folder="/simvue_client_demos", @@ -104,45 +147,9 @@ def metadata_parser(file_name: str, **_) -> tuple[dict[str, Any], dict[str, Any] retention_period="1 hour" if ci else None, visibility="tenant" if ci else None, ) - run.add_process( - identifier="SU2_simulation", - executable="SU2_CFD", - script=config_filename, - env=environment, - completion_callback=lambda *_, **__: termination_trigger.set(), - ) - with multiparser.FileMonitor( - # Metrics cannot have square brackets in their names so we remove - # these before passing them to log_metrics - per_thread_callback=lambda metrics, *_: run.log_metrics( - { - key.replace("[", "_").replace("]", ""): value - for key, value in metrics.items() - } - ), - exception_callback=run.log_event, - terminate_all_on_fail=True, - plain_logging=True, - flatten_data=True, - termination_trigger=termination_trigger, - ) as monitor: - monitor.track( - path_glob_exprs=[config_filename], - parser_func=metadata_parser, - callback=lambda meta, *_: run.update_metadata(meta), - static=True, - ) - monitor.tail( - path_glob_exprs=[HISTORY], - parser_func=mp_tail_parse.record_csv, - ) - monitor.track( - path_glob_exprs=OUTPUT_FILES, - callback=lambda *_, meta: run.save_file(meta["file_name"], "output"), - parser_func=lambda *_, **__: ({}, {}), - ) - monitor.run() - + + # Then run your custom 'launch' method, which will run each of the internal methods you created + run.launch(su2_binary_directory, config_filename) if __name__ == "__main__": run_su2_example() diff --git a/examples/SU2/SU2.sh b/examples/SU2/SU2.sh index ad2eee1f..3d87d7df 100644 --- a/examples/SU2/SU2.sh +++ b/examples/SU2/SU2.sh @@ -1,13 +1,9 @@ #!/bin/bash # Makes use of the SU2 tutorial: https://su2code.github.io/tutorials/Inviscid_ONERAM6/ -export SU2_RUN= +export SU2_RUN=/home/wk9874/Documents/simvue/python-api/examples/SU2/bin export PATH=$SU2_RUN:$PATH export PYTHONPATH=$SU2_RUN:$PYTHONPATH -# Execute SU2 & write PID to file -SU2_CFD inv_ONERAM6.cfg & -echo $! >/tmp/pid.file - # Execute Simvue monitor -python3 SU2.py /tmp/pid.file +python3 SU2.py inv_ONERAM6.cfg diff --git a/examples/Tensorflow/README.md b/examples/Tensorflow/README.md deleted file mode 100644 index dd09c3bc..00000000 --- a/examples/Tensorflow/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Tensorflow - -Setup a virtual environment: -``` -python3 -m venv venv -source ./venv/bin/activate -``` -Install the required dependencies: -``` -pip install -r requirements.txt -``` -Run the code: -``` -python3 dynamic_rnn.py -``` diff --git a/examples/Tensorflow/dynamic_rnn.py b/examples/Tensorflow/dynamic_rnn.py deleted file mode 100644 index 1c89e1af..00000000 --- a/examples/Tensorflow/dynamic_rnn.py +++ /dev/null @@ -1,218 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import random -import click - -import numpy as np - -# Import TensorFlow v2. -import tensorflow as tf -from tensorflow.keras import Model, layers - -from simvue import Run - -# Taken from https://github.com/aymericdamien/TensorFlow-Examples/ - - -@click.command -@click.option( - "--classes", type=int, default=2, help="linear sequence or not", show_default=True -) -@click.option( - "--lr", type=float, default=0.001, help="learning rate", show_default=True -) -@click.option("--training-steps", type=int, default=2000, show_default=True) -@click.option("--batch-size", type=int, default=64, show_default=True) -@click.option( - "--num-units", - type=int, - default=32, - help="number of neurons for the LSTM layer", - show_default=True, -) -@click.option("--ci", is_flag=True) -def run_tensorflow_example( - classes: int, - lr: float, - training_steps: int, - batch_size: int, - ci: bool, - num_units: int, -) -> None: - # Dataset parameters. - num_classes = classes # linear sequence or not. - seq_max_len = 20 # Maximum sequence length. - seq_min_len = 5 # Minimum sequence length (before padding). - masking_val = -1 # -1 will represents the mask and be used to pad sequences to a common max length. - max_value = 10000 # Maximum int value. - - if ci: - batch_size = 1 - training_steps = 1 - - with Run() as run: - run.init( - "tensorflow_dynamic_rnn", - metadata={ - "dataset.num_classes": num_classes, - "dataset.seq_max_len": seq_max_len, - "dataset.seq_min_len": seq_min_len, - "dataset.masking_val": masking_val, - "training.learning_rate": lr, - "training.training_steps": training_steps, - "training.batch_size": batch_size, - "network.num_units": num_units, - }, - description="TensorFlow 2.0 implementation of a Recurrent Neural Network (LSTM) that performs dynamic " - "computation over sequences with variable length. This example is using a toy dataset to " - "classify linear sequences. The generated sequences have variable length.", - retention_period="1 hour" if ci else None, - tags=["tensorflow", "simvue_client_examples"], - folder="/simvue_client_demos", - visibility="tenant" if ci else None, - ) - run.save_file(__file__, "code") - - # ==================== - # TOY DATA GENERATOR - # ==================== - - def toy_sequence_data(): - """Generate sequence of data with dynamic length. - This function generates toy samples for training: - - Class 0: linear sequences (i.e. [1, 2, 3, 4, ...]) - - Class 1: random sequences (i.e. [9, 3, 10, 7,...]) - - NOTICE: - We have to pad each sequence to reach 'seq_max_len' for TensorFlow - consistency (we cannot feed a numpy array with inconsistent - dimensions). The dynamic calculation will then be perform and ignore - the masked value (here -1). - """ - while True: - # Set variable sequence length. - seq_len = random.randint(seq_min_len, seq_max_len) - rand_start = random.randint(0, max_value - seq_len) - # Add a random or linear int sequence (50% prob). - if random.random() < 0.5: - # Generate a linear sequence. - seq = np.arange(start=rand_start, stop=rand_start + seq_len) - # Rescale values to [0., 1.]. - seq = seq / max_value - # Pad sequence until the maximum length for dimension consistency. - # Masking value: -1. - seq = np.pad( - seq, - mode="constant", - pad_width=(0, seq_max_len - seq_len), - constant_values=masking_val, - ) - label = 0 - else: - # Generate a random sequence. - seq = np.random.randint(max_value, size=seq_len) - # Rescale values to [0., 1.]. - seq = seq / max_value - # Pad sequence until the maximum length for dimension consistency. - # Masking value: -1. - seq = np.pad( - seq, - mode="constant", - pad_width=(0, seq_max_len - seq_len), - constant_values=masking_val, - ) - label = 1 - yield np.array(seq, dtype=np.float32), np.array(label, dtype=np.float32) - - # Use tf.data API to shuffle and batch data. - train_data = tf.data.Dataset.from_generator( - toy_sequence_data, output_types=(tf.float32, tf.float32) - ) - train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1) - - # Create LSTM Model. - class LSTM(Model): - # Set layers. - def __init__(self): - super(LSTM, self).__init__() - # Define a Masking Layer with -1 as mask. - self.masking = layers.Masking(mask_value=masking_val) - # Define a LSTM layer to be applied over the Masking layer. - # Dynamic computation will automatically be performed to ignore -1 values. - self.lstm = layers.LSTM(units=num_units) - # Output fully connected layer (2 classes: linear or random seq). - self.out = layers.Dense(num_classes) - - # Set forward pass. - def call(self, x, is_training=False): - # A RNN Layer expects a 3-dim input (batch_size, seq_len, num_features). - x = tf.reshape(x, shape=[-1, seq_max_len, 1]) - # Apply Masking layer. - x = self.masking(x) - # Apply LSTM layer. - x = self.lstm(x) - # Apply output layer. - x = self.out(x) - if not is_training: - # tf cross entropy expect logits without softmax, so only - # apply softmax when not training. - x = tf.nn.softmax(x) - return x - - # Build LSTM model. - lstm_net = LSTM() - - # Cross-Entropy Loss. - # Note that this will apply 'softmax' to the logits. - def cross_entropy_loss(x, y): - # Convert labels to int 64 for tf cross-entropy function. - y = tf.cast(y, tf.int64) - # Apply softmax to logits and compute cross-entropy. - loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x) - # Average loss across the batch. - return tf.reduce_mean(loss) - - # Accuracy metric. - def accuracy(y_pred, y_true): - # Predicted class is the index of highest score in prediction vector (i.e. argmax). - correct_prediction = tf.equal( - tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64) - ) - return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1) - - # Adam optimizer. - optimizer = tf.optimizers.Adam(lr) - - # Optimization process. - def run_optimization(x, y): - # Wrap computation inside a GradientTape for automatic differentiation. - with tf.GradientTape() as g: - # Forward pass. - pred = lstm_net(x, is_training=True) - # Compute loss. - loss = cross_entropy_loss(pred, y) - - # Variables to update, i.e. trainable variables. - trainable_variables = lstm_net.trainable_variables - - # Compute gradients. - gradients = g.gradient(loss, trainable_variables) - - # Update weights following gradients. - optimizer.apply_gradients(zip(gradients, trainable_variables)) - - # Run training for the given number of steps. - for batch_x, batch_y in train_data.take(training_steps): - # Run the optimization to update W and b values. - run_optimization(batch_x, batch_y) - - pred = lstm_net(batch_x, is_training=True) - loss = cross_entropy_loss(pred, batch_y) - acc = accuracy(pred, batch_y) - run.log_metrics({"loss": float(loss), "accuracy": float(acc)}) - - run.update_metadata({"loss": float(loss), "accuracy": float(acc)}) - - -if __name__ in "__main__": - run_tensorflow_example() diff --git a/examples/Tensorflow/requirements.txt b/examples/Tensorflow/requirements.txt deleted file mode 100644 index 6c1c3968..00000000 --- a/examples/Tensorflow/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -tensorflow -simvue diff --git a/notebooks/README.md b/notebooks/README.md index 97699ff3..9a152e15 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -1,13 +1,36 @@ -# Colab notebooks +# Example Notebooks +These example notebooks give you some simple examples of integrating Simvue into your workflow to track and monitor any simulation or data processing task. -## Using Simvue in Google Colab +## Basic Example -This demonstrates how to use Simvue in Google Colab. +[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1VleQ-Ga010w9TE2oTBnTnJdGHlWZMJKn?usp=sharing) -## Simple Tensorflow example +In this example we take a simple piece of Python code which finds the average of a set of random numbers, and use Simvue to: -[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1HDN0gUKC9pGtroOQhylrH_eM6BnIvcRC?usp=sharing) +* Start a new run to track the progress of the code +* Upload metrics in real time to the server +* Upload events to tell us when the code is complete -In this example we take an existing Python code and then make use of Simvue to: -* collect metadata, -* collect metrics which can be visualised in real time in the Simvue dashboard. +## Detailed Example + +[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1GHItQvWS9HBUoTmdZxDYnGq0wfmdYhsc?usp=sharing) + +In this more detailed example, we create a simple simulation of customers arriving at a bank counter and either being served or running out of patience and leaving. We then use Simvue to: + +* Start a new run to track the progress of this simulation +* Upload artifacts for storage in the form of a file and a Numpy array +* Add metadata to keep track of input parameters for the simulation +* Upload metrics in real time to the server to keep track of the average customer wait time and percentage who don't get served +* Add events which show us the status of each customer +* Add alerts which notify us if too many customers are not being served in time + +## Non-Python Example + +[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1fDlJ6xeRmHfDsdqN5ATJ8lTj4lqavTqd?usp=sharing) + +In this example, we demonstrate how Simvue can be used to track simulations (or other computational tasks) which are not Python-based by tracking the output and/or log files which they create during execution. We use Simvue to: +* Create a class which wraps the Run class, adding functionality for tracking output files in real time using Multiparser +* Use this class to start a new run to track the progress of this simulation +* Use `add_process` to have Simvue start and monitor a non-Python simulation +* Upload metrics in real time to the server to keep track of the temperature of a sample being heated and cooled +* Upload the script and output files as artifacts for storage on the Simvue server \ No newline at end of file diff --git a/notebooks/basic_example.ipynb b/notebooks/basic_example.ipynb new file mode 100644 index 00000000..fe039820 --- /dev/null +++ b/notebooks/basic_example.ipynb @@ -0,0 +1,179 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "view-in-github" + }, + "source": [ + "\"Open\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "-pVhOfzLx9us" + }, + "source": [ + "

\n", + " \n", + " \n", + " \n", + " \"Simvue\"\n", + " \n", + "

\n", + "\n", + "# Basic Example\n", + "This is a basic example of some of the functionality of Simvue. For a more detailed example which introduces more features, [see the tutorial here](https://docs.simvue.io/tutorial_basic/introduction/).\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Install dependencies\n", + "Install any dependencies if you have not already done so:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install simvue numpy" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "wKJ4bd5rt1wy" + }, + "source": [ + "\n", + "### Initialisation\n", + "To proceed you need to specify the URL of the Simvue server and provide an access token used to authenticate to the server. This can be done by either creating a `simvue.toml` file containing the required details, or specifying them as environment variables.\n", + "\n", + "Login to https://uk.simvue.io, go to the **Runs** page and click **Create new run**. Copy the 'token' from here. The run the cell below, paste the token into the box when prompted and push enter." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "\n", + "os.environ[\"SIMVUE_URL\"] = \"https://uk.simvue.io\"\n", + "os.environ[\"SIMVUE_TOKEN\"] = getpass.getpass(prompt=\"Token: \")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example Simulation - Random Numbers\n", + "As a simple example, we are going to create a piece of code which generates a sequence of random numbers over time, and calculates the mean and median values. We will want to track how these averages vary over time, and have Simvue trigger an alert if they fall outside of expected parameters.\n", + "\n", + "The first thing we want to do is initialize our Simvue run. To do this we import the `Run` object from Simvue, and use it as a context manager:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from simvue import Run\n", + "import time\n", + "import random\n", + "import numpy\n", + "\n", + "with Run() as run:\n", + " # Initialize a run on the server, optionally providing a name, tags, folder etc\n", + " run.init(\n", + " name=\"random-numbers-example-%d\" % time.time(),\n", + " tags=[\"example\", \"random-numbers\"],\n", + " folder=\"/examples\",\n", + " )\n", + " \n", + " # Initialise an empty array which expects integers to store our random numbers in\n", + " all_numbers = numpy.array([], dtype=numpy.int64) \n", + " \n", + " # Create our 'simulation' to track\n", + " for i in range(0, 120):\n", + " \n", + " # Generate random numbers and find the averages\n", + " random_number = random.randint(0, 10)\n", + " all_numbers = numpy.append(all_numbers, random_number)\n", + " mean = float(numpy.average(all_numbers))\n", + " median = int(numpy.median(all_numbers)) \n", + " \n", + " # We can then use simvue to track the values of these metrics\n", + " run.log_metrics(\n", + " {\n", + " \"random_number\": random_number,\n", + " \"average.mean\": mean,\n", + " \"average.median\": median\n", + " }\n", + " )\n", + " time.sleep(1) \n", + " \n", + " # Once complete, we can add a message to the events log\n", + " run.log_event(\"Random number generation is complete!\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Results\n", + "You can view the results of this by logging into the web UI and viewing the run at the link above. You should be able to see a new run has been created and metrics from the above simulation are updating live as the simulation progresses.\n", + "\n", + "This only scratches the surface of what Simvue can do! You can also:\n", + "- Upload input, output or code files for storage as Artifacts\n", + "- Upload Python objects such as Numpy arrays or dictionaries for storage\n", + "- Add tags and metadata for easier categorisation and filtering of runs\n", + "- Setup alerts based on metrics or events which will inform you if things go wrong\n", + "- Track the carbon emissions associated with your simulations, so you can find ways to reduce them\n", + "- Run programs as subprocesses, allowing simvue to track their logs and alert the user if they fail\n", + "- Track outputs from non-Python programs using the [Multiparser](https://github.com/ukaea/Multiparser)\n", + "- Easily track outputs from common software packages using the custom [Integrations](https://github.com/simvue-io/integrations)\n", + "\n" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "colab-github-demo.ipynb", + "provenance": [], + "version": "0.3.2" + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/notebooks/detailed_example.ipynb b/notebooks/detailed_example.ipynb new file mode 100644 index 00000000..909fac6a --- /dev/null +++ b/notebooks/detailed_example.ipynb @@ -0,0 +1,308 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

\n", + " \n", + " \n", + " \n", + " \"Simvue\"\n", + " \n", + "

\n", + "\n", + "# Detailed Example using Simpy\n", + "This is a more detailed example of using Simvue to track and monitor a simulation. In this case we are going to use a package called Simpy to models a bank counter and customers arriving at random times. Each customer has a certain patience. They wait to get to the counter until they're at the end of their tether. If they get to the counter, they uses it for a while before releasing it for the next customer to use.\n", + "\n", + "This is based on the Bank Renege example from the Simpy documentation - [see the full example here](https://simpy.readthedocs.io/en/latest/examples/bank_renege.html)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Install dependencies\n", + "Install any dependencies if you have not already done so:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install simvue simpy numpy" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "### Initialisation\n", + "To proceed you need to specify the URL of the Simvue server and provide an access token used to authenticate to the server. This can be done by either creating a `simvue.toml` file containing the required details, or specifying them as environment variables.\n", + "\n", + "Login to https://uk.simvue.io, go to the **Runs** page and click **Create new run**. Copy the 'token' from here. The run the cell below, paste the token into the box when prompted and push enter." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "\n", + "os.environ[\"SIMVUE_URL\"] = \"https://uk.simvue.io\"\n", + "os.environ[\"SIMVUE_TOKEN\"] = getpass.getpass(prompt=\"Token: \")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Creating the Model\n", + "Now we are going to create our simulation of the bank. Firstly, we will import our required modules and define some constants which will be used throughout the simulation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import random\n", + "import simpy\n", + "import simvue\n", + "import numpy\n", + "import time\n", + "\n", + "RANDOM_SEED = 42 # This makes the simulation reproducible - change it to get a new, randomised simulation\n", + "NEW_CUSTOMERS = 40 # Total number of customers\n", + "INTERVAL_CUSTOMERS = 10.0 # Generate new customers roughly every x seconds\n", + "MIN_PATIENCE = 1 # Minimum customer patience (seconds)\n", + "MAX_PATIENCE = 5 # Maximmum customer patience (seconds)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We then create a function which defines the behaviour of each customer, passing in the following parameters:\n", + "* **env**: The simulation environment.\n", + "* **name**: The customer’s name.\n", + "* **counter**: The resource representing the bank counter.\n", + "* **time_in_bank**: Average time a customer spends at the counter.\n", + "* **run**: The Simvue Run object for tracking the simulation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def customer(env, name, counter, time_in_bank, run):\n", + " \"\"\"Customer arrives, is served and leaves.\"\"\"\n", + " arrive = env.now\n", + " # Log an event with Simvue for when each customer arrives at the bank\n", + " run.log_event(f'{arrive:7.4f} {name}: Here I am!')\n", + "\n", + " # The customer requests to access the counter\n", + " with counter.request() as req:\n", + " patience = random.uniform(MIN_PATIENCE, MAX_PATIENCE)\n", + " \n", + " # Wait for the counter to become available, or abort once the customer has ran out of patience\n", + " results = yield req | env.timeout(patience)\n", + "\n", + " # Record how long they waited at the counter\n", + " wait = env.now - arrive\n", + "\n", + " if req in results:\n", + " # The customer got to the counter\n", + " \n", + " # Log an event to show that they have been served\n", + " run.log_event(f'{env.now:7.4f} {name}: SERVED after {wait:6.3f}')\n", + " \n", + " # The customer then spends a random amount of time at the counter (exponential distribution around the average time we specified)\n", + " tib = random.expovariate(1.0 / time_in_bank)\n", + " yield env.timeout(tib)\n", + " \n", + " # Log an event once they have finished being served\n", + " run.log_event(f'{env.now:7.4f} {name}: Finished')\n", + "\n", + " else:\n", + " # The customer gave up - increment counter and log an event\n", + " env.reneged_customers += 1\n", + " run.log_event(f'{env.now:7.4f} {name}: RENEGED after {wait:6.3f}')\n", + "\n", + " # Update statistics - record wait time, average wait time for all customers, and percentage who reneged\n", + " env.wait_times = numpy.append(env.wait_times, wait)\n", + " _average_wait = numpy.mean(env.wait_times)\n", + " _percentage_reneged = env.reneged_customers / env.total_customers * 100\n", + " \n", + " # Log these statistics as metrics to Simvue\n", + " run.log_metrics({\"percentage_reneged\": _percentage_reneged, \"average_wait\": _average_wait}, time=env.now)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We then define a source function - this is used to generate our bank customers at semi random intervals. We pass in the following variables to this function:\n", + "* **env**: The simulation environment.\n", + "* **number**: Number of customers to generate.\n", + "* **interval**: Average interval between customer arrivals.\n", + "* **counter**: The resource representing the bank counter.\n", + "* **run**: The Simvue Run object for tracking the simulation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def source(env, number, interval, counter, run):\n", + " \"\"\"Source generates customers randomly\"\"\"\n", + " # Generate a new customer, process it, and then wait for a random length of time before creating another one\n", + " for i in range(number):\n", + " env.total_customers += 1\n", + " c = customer(env, f'Customer{i:02d}', counter, time_in_bank=12.0, run=run)\n", + " env.process(c)\n", + " t = random.expovariate(1.0 / interval)\n", + " yield env.timeout(t)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we want to set up our Simvue run and start the simulation. To do this we use the `Run` class from Simvue as a context manager, and call the `init` method. We then add any additional information we want to store, before running the simulation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Setup the simulation (will run it in real time)\n", + "random.seed(RANDOM_SEED)\n", + "env = simpy.rt.RealtimeEnvironment(factor=1, strict=False)\n", + "\n", + "# Initialize statisticss as part of the env object\n", + "env.total_customers = 0\n", + "env.reneged_customers = 0\n", + "env.wait_times = numpy.array([])\n", + "\n", + "# Start Simvue run as a context manager and initialize the run\n", + "with simvue.Run() as run:\n", + " run.init(\n", + " name=\"bank-customers-example-%d\" % time.time(),\n", + " folder=\"/examples\",\n", + " description=\"Simulate customers being served at a bank, recording the wait times and percentage who don't get served.\",\n", + " tags=[\"example\", \"bank-customers\"],\n", + " notification=\"all\"\n", + " )\n", + " \n", + " # Upload metadata which corresponds to the variables we defined at the beginning\n", + " run.update_metadata(\n", + " {\n", + " \"random_seed\": RANDOM_SEED,\n", + " \"num_customers\": NEW_CUSTOMERS,\n", + " \"average_customer_interval\": INTERVAL_CUSTOMERS,\n", + " \"customer_min_patience\": MIN_PATIENCE,\n", + " \"customer_max_patience\": MAX_PATIENCE\n", + " }\n", + " )\n", + " \n", + " # Upload this file as a code artifact\n", + " run.save_file(os.path.join(os.getcwd(), \"simvue_detailed_example.ipynb\"), category=\"code\")\n", + " \n", + " # Add some alerts so that we can be notified if things go wrong\n", + " \n", + " # For example, could add an Event based alert which is triggered when a customer gives up\n", + " run.create_event_alert(\n", + " name=\"customer_reneged\",\n", + " pattern=\"RENEGED\",\n", + " description=\"A bank customer gave up before being served!\"\n", + " )\n", + " # Or a Metric based alert which is triggered when the percentage reneged is above 40%\n", + " run.create_metric_threshold_alert(\n", + " name=\"customer_reneged_above_40_percent\",\n", + " metric=\"percentage_reneged\",\n", + " threshold=40,\n", + " rule=\"is above\",\n", + " description=\"More than 40 percent of customers are giving up before being served!\",\n", + " notification=\"email\",\n", + " window=1\n", + " )\n", + " \n", + " # Start processes and run the simulation\n", + " counter = simpy.Resource(env, capacity=1)\n", + " env.process(source(env, NEW_CUSTOMERS, INTERVAL_CUSTOMERS, counter, run))\n", + " env.run()\n", + " \n", + " # Once simulation is complete, save our array of wait times as an output Artifact\n", + " run.save_object(env.wait_times, category='output', name='wait_times')\n", + " \n", + " # Let's say if more than 50% of customers gave up before being served, our run is a failure\n", + " if (env.reneged_customers / env.total_customers) > 0.5:\n", + " run.set_status(\"failed\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Results\n", + "That's it! You can now view the run in the Simvue UI by clicking on the link above. You should be able to see:\n", + "* A new run has been created in the `/examples` folder, with the name, tags and description which we specified in the `init` method\n", + "* The run has a set of metadata detailing the variables we used in our simulation, along with some automatically collected information like the Python environment used\n", + "* This notebook has been uploaded as a Code artifact, and once the simulation has finished our array of wait times is uploaded as an Output artifact\n", + "* There are two metrics, `average_wait` and `percentage_reneged`, which are updating live as the simulation progresses\n", + "* The events log shows each customer arriving, waiting, and either being served or reneging\n", + "* There are two alerts:\n", + " - One based on the events log, which should fire near the start of the run when the first customer gives up without being served\n", + " - One based on the `percentage_reneged` metric, which fires near the end of the simulation when the percentage of customers who reneged (time averaged over the last minute) reached 40%. This one should also send you an email\n", + "* The run's status is set to 'failed' if the final percentage of customers giving up exceeds 50%, otherwise it is 'completed'\n", + "* You received an email when the run finished, telling you that it failed\n", + "\n", + "Try tweaking the input parameters, and see what effect it has! Compare different runs easily using the Simvue web UI, with the ability to filter based on things like run status, tags, and metadata to identify the runs you care about. and creating custom plots to visualise your results.\n", + "\n", + "(**Note**: If you want to make the simulation run more quickly, reduce the `factor` parameter in the initialization of your `env` (at the top of the previous cell)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/non_python_example.ipynb b/notebooks/non_python_example.ipynb new file mode 100644 index 00000000..a0d9d081 --- /dev/null +++ b/notebooks/non_python_example.ipynb @@ -0,0 +1,214 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

\n", + " \n", + " \n", + " \n", + " \"Simvue\"\n", + " \n", + "

\n", + "\n", + "# Example tracking a Non-Python Simulation\n", + "This is an example of how you can use Simvue with Multiparser to track and monitor a non-Python simulation or other computational task. We will use a simple bash script to create some dummy 'temperature' data, looking at an experiment where the sample which is heated in an electric oven (causing its temperature to increase linearly), and then is taken out of the oven to cool down (losing temperature exponentially). We will then track this using the output file which is created during the experiment.\n", + "\n", + "Note that for some common simulation softwares, we already have custom made integrations packages which can be used. These include:\n", + "* MOOSE (Multiphysics Object Oriented Simulation Environment)\n", + "* OpenFOAM\n", + "* FDS (Fire Dynamics Simulator)\n", + "* TensorFlow\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Install dependencies\n", + "While you can use Simvue and Multiparser individually to achieve the tracking and monitoring of Non-Python files, we recommend using the `simvue-integrations` package which wraps these both together into a handy `WrappedRun` class. Install this if you have not done so already:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install git+https://github.com/simvue-io/integrations.git" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initialisation\n", + "To proceed you need to specify the URL of the Simvue server and provide an access token used to authenticate to the server. This can be done by either creating a `simvue.toml` file containing the required details, or specifying them as environment variables.\n", + "\n", + "Login to https://uk.simvue.io, go to the **Runs** page and click **Create new run**. Copy the 'token' from here. The run the cell below, paste the token into the box when prompted and push enter." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "\n", + "os.environ[\"SIMVUE_URL\"] = \"https://uk.simvue.io\"\n", + "os.environ[\"SIMVUE_TOKEN\"] = getpass.getpass(prompt=\"Token: \")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Using the WrappedRun class\n", + "To track our experiment, we will use the `add_process()` method to launch our bash script which generates the data, and we will use the `file_monitor` object to track our temperature data file. The WrappedRun class contains four methods which can be overridden to tailor to your needs:\n", + "* `_pre_simulation`: Things to do before any output files are tracked, such as uploading artifacts or launching the simulation\n", + "* `_during_simulation`: How to track the files created during the simulation and process data from them for upload to Simvue\n", + "* `_post_simulation`: Things to do after the simulation is complete, such as uploading final results files\n", + "* `launch`: Start the tracking session, calling each of the three methods above\n", + "\n", + "In our case, we want to:\n", + "* Override `launch` to accept the path to the bash script\n", + "* Override `_pre_simulation` to upload and run the bash script\n", + "* Override `_during_simulation` to read from the temperature data as it is written and upload it as a metric\n", + "\n", + "Note that `WrappedRun` inherits from Simvue's `Run` class, so contains all of the methods you are already familiar with such as `log_metrics`, `log_events` etc..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from simvue_integrations.connectors.generic import WrappedRun\n", + "import multiparser.parsing.tail as mp_tail_parser\n", + "import time\n", + "import pathlib\n", + "\n", + "# Create a new class which inherits from WrappedRun\n", + "class TemperatureRun(WrappedRun):\n", + " script_path: pathlib.Path = None\n", + " \n", + " # Override the `_pre_simulation` method to launch the process\n", + " def _pre_simulation(self):\n", + " # Call the base method first\n", + " super()._pre_simulation()\n", + " \n", + " # Add a process to the run using `add_process`\n", + " self.add_process(\n", + " identifier=\"heating_experiment\",\n", + " executable=\"bash\",\n", + " script=self.script_path,\n", + " completion_trigger=self._trigger # Sets a multiprocessing Event once the simulation is completed\n", + " )\n", + " \n", + " # Override the `_during_simulation` method to track the temperature data\n", + " def _during_simulation(self):\n", + " # Use the `tail` method of the Multiparser `FileMonitor` object to track file, line by line\n", + " self.file_monitor.tail(\n", + " path_glob_exprs=str(self.script_path.with_suffix(\".csv\")),\n", + " parser_func=mp_tail_parser.record_csv, # Use the built-in CSV parser, which returns a dictionary of data and metadata as each line is written\n", + " callback=lambda csv_data, metadata: self.log_metrics( # Use data from those two dictionaries to log a metric:\n", + " {'sample_temperature': csv_data[\"Temperature\"]},\n", + " time=csv_data[\"Time\"], \n", + " step=csv_data[\"Step\"], \n", + " ) \n", + " )\n", + " \n", + " # Override the `launch` method to accept the path to the bash script\n", + " def launch(self, script_path: str):\n", + " self.script_path = script_path\n", + " # Call the base `launch` method to call the above methods in the correct order\n", + " super().launch()\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Using our TemperatureRun class\n", + "We can then use our `TemperatureRun` class in the same way as we would use the Simvue `Run` class - use it as a context manager, and call the `init` method. We can then add any additional information we want to store, before running the simulation by calling the `launch` method:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with TemperatureRun() as run:\n", + " \n", + " run.init(\n", + " name=\"heating-cooling-example-%d\" % time.time(),\n", + " folder=\"/examples\",\n", + " description=\"Simulate an experiment where a sample is heated and then left to cool, tracking the temperature.\",\n", + " tags=[\"example\", \"heating-cooling\"],\n", + " )\n", + " \n", + " # Can upload extra things we care about, eg could upload some metadata\n", + " run.update_metadata(\n", + " {\n", + " \"initial_temperature\": 20,\n", + " \"heating_time\": 50,\n", + " \"cooling_time\": 100\n", + " }\n", + " )\n", + " # Then run launch to start the experiment\n", + " run.launch(pathlib.Path.cwd().joinpath(\"temperatures.sh\"))\n", + " \n", + " # Then once complete, can upload any other information before closing the run\n", + " run.save_file(pathlib.Path.cwd().joinpath(\"temperatures.csv\"), category=\"output\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Results\n", + "That's it! You can now view the run in the Simvue UI by clicking on the link above. You should be able to see:\n", + "* A new run has been created in the `/examples` folder, with the name, tags and description which we specified in the `init` method\n", + "* The run has a set of metadata detailing some of our inputs to the simulation\n", + "* The simulation was automatically launched as a Simvue process\n", + "* The bash script used was uploaded as a Code artifact\n", + "* The temperature is being parsed from the CSV file and uploaded as a metric in real time\n", + "* Once complete, the CSV results file is uploaded as an Output artifact" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/simvue_bluemira_example.ipynb b/notebooks/simvue_bluemira_example.ipynb deleted file mode 100644 index 766103a7..00000000 --- a/notebooks/simvue_bluemira_example.ipynb +++ /dev/null @@ -1,767 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "view-in-github" - }, - "source": [ - "\"Open" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "t8naAmYqvOcG" - }, - "source": [ - "\"Simvue\"\n", - "\n", - "# Simple example using Bluemira to optimise equilibrium of plasma and TF-coil shape\n", - "\n", - "Simplistic Reactor Design\n", - "This example show hows to set up a simple reactor, consisting of a plasma and a single TF coil. The TF coil will be optimised such that its length is minimised, whilst maintaining a minimum distance to the plasma.\n", - "\n", - "To do this we'll run through how to set up the parameters for the build, how to define the Builders and Designers (including the optimisation problem) for the plasma and TF coil, and how to run the build with configurable parameters.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZY764bW-Y3Nd" - }, - "source": [ - "## Set an access token\n", - "A token needs to be specified in order to authenticate to the Simvue REST API. To obtain the token, login to https://app.simvue.io, go to the **Runs** page and click **Create new run**. Copy the token and paste it into the box when prompted and push enter.\n", - "\n", - "It is important to note that the token will not be saved in this notebook." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "WwNo_ja_45Zz" - }, - "outputs": [], - "source": [ - "import os\n", - "\n", - "os.environ[\"SIMVUE_URL\"] = \"https://app.simvue.io\"\n", - "os.environ[\"SIMVUE_TOKEN\"] = getpass.getpass(prompt=\"Token: \")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "h3m7b9siaHqG" - }, - "source": [ - "## Install dependencies" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "6iixvs2NWeso" - }, - "outputs": [], - "source": [ - "!pip install tensorflow simvue" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-MKgsIcIaL4R" - }, - "source": [ - "##\u00a0The code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "SLDZTHMgv5P8" - }, - "outputs": [], - "source": [ - "from __future__ import absolute_import, division, print_function\n", - "\n", - "import os\n", - "\n", - "from dataclasses import dataclass\n", - "from typing import Callable, Dict\n", - "\n", - "import numpy as np\n", - "\n", - "from bluemira.base.builder import Builder, ComponentManager\n", - "from bluemira.base.components import Component, PhysicalComponent\n", - "from bluemira.base.designer import Designer\n", - "from bluemira.base.parameter_frame import Parameter, ParameterFrame\n", - "from bluemira.base.reactor import Reactor\n", - "from bluemira.display.palettes import BLUE_PALETTE\n", - "from bluemira.equilibria.shapes import JohnerLCFS\n", - "from bluemira.geometry.face import BluemiraFace\n", - "from bluemira.geometry.optimisation import GeometryOptimisationProblem, minimise_length\n", - "from bluemira.geometry.parameterisations import GeometryParameterisation\n", - "from bluemira.geometry.tools import (\n", - " distance_to,\n", - " make_polygon,\n", - " offset_wire,\n", - " revolve_shape,\n", - " sweep_shape,\n", - ")\n", - "from bluemira.geometry.wire import BluemiraWire\n", - "from bluemira.utilities.opt_problems import (\n", - " OptimisationConstraint,\n", - " OptimisationObjective,\n", - ")\n", - "from bluemira.utilities.optimiser import Optimiser, approx_derivative\n", - "from bluemira.utilities.tools import get_class_from_module" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "jz0hbM15wB5D" - }, - "outputs": [], - "source": [ - "# Firstly we need to define the parameters we're going to use in our reactor design for each component.\n", - "@dataclass\n", - "class PlasmaDesignerParams(ParameterFrame):\n", - " \"\"\"Plasma Designer ParameterFrame\"\"\"\n", - "\n", - " R_0: Parameter[float]\n", - " A: Parameter[float]\n", - " z_0: Parameter[float]\n", - " kappa_u: Parameter[float]\n", - " kappa_l: Parameter[float]\n", - " delta_u: Parameter[float]\n", - " delta_l: Parameter[float]\n", - " phi_neg_u: Parameter[float]\n", - " phi_pos_u: Parameter[float]\n", - " phi_pos_l: Parameter[float]\n", - " phi_neg_l: Parameter[float]\n", - "\n", - "\n", - "@dataclass\n", - "class TFCoilBuilderParams(ParameterFrame):\n", - " \"\"\"TF Coil Builder ParameterFrame\"\"\"\n", - "\n", - " tf_wp_width: Parameter[float]\n", - " tf_wp_depth: Parameter[float]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "iRyGA15Hz-Us" - }, - "source": [ - "To manage access to properties of the components we need some ComponentManagers" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "wtj5DEbU0ESg" - }, - "outputs": [], - "source": [ - "class Plasma(ComponentManager):\n", - " \"\"\"Plasma manager\"\"\"\n", - "\n", - " def lcfs(self):\n", - " \"\"\"Get separatrix\"\"\"\n", - " return (\n", - " self.component().get_component(\"xz\").get_component(\"LCFS\").shape.boundary[0]\n", - " )\n", - "\n", - "\n", - "class TFCoil(ComponentManager):\n", - " \"\"\"TF Coil manager\"\"\"\n", - "\n", - " def wp_volume(self):\n", - " \"\"\"Get winding pack volume\"\"\"\n", - " return (\n", - " self.component()\n", - " .get_component(\"xyz\")\n", - " .get_component(\"Winding pack\")\n", - " .shape.volume()\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wnW3BQFW0FRL" - }, - "source": [ - "We then need a reactor in which to store the components. Notice that the typing of the components here is the relevent ComponentManager" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "odufhcRM0N5w" - }, - "outputs": [], - "source": [ - "class MyReactor(Reactor):\n", - " \"\"\"Reactor container\"\"\"\n", - "\n", - " plasma: Plasma\n", - " tf_coil: TFCoil" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6wkTFPO80Oqg" - }, - "source": [ - "Now we want to define a way to optimise the TF coil shape. We want to minimise the length of the TF coil, constraining the optimiser such that the any part of the coil is always a minimum distance away from the plasma.\n", - "\n", - "Further information on geometry can be found in the geometry tutorial and information about geometry optimisation can be found in the geometry optimisation tutorial." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "v61OaPfc0eDO" - }, - "outputs": [], - "source": [ - "class MyTFCoilOptProblem(GeometryOptimisationProblem):\n", - " \"\"\"\n", - " A simple geometry optimisation problem for the TF coil current centreline\n", - "\n", - " Here we:\n", - "\n", - " minimise: length\n", - " subject to:\n", - " min_distance_to_LCFS >= min_distance\n", - " \"\"\"\n", - "\n", - " def __init__(\n", - " self,\n", - " geometry_parameterisation: GeometryParameterisation,\n", - " lcfs: BluemiraWire,\n", - " optimiser: Optimiser,\n", - " min_distance: float,\n", - " ):\n", - " objective = OptimisationObjective(\n", - " minimise_length,\n", - " f_objective_args={\"parameterisation\": geometry_parameterisation},\n", - " )\n", - " constraints = [\n", - " OptimisationConstraint(\n", - " self.f_constraint,\n", - " f_constraint_args={\n", - " \"parameterisation\": geometry_parameterisation,\n", - " \"lcfs\": lcfs,\n", - " \"min_distance\": min_distance,\n", - " \"ad_args\": {},\n", - " },\n", - " tolerance=1e-6,\n", - " constraint_type=\"inequality\",\n", - " )\n", - " ]\n", - " super().__init__(\n", - " geometry_parameterisation, optimiser, objective, constraints=constraints\n", - " )\n", - "\n", - " @staticmethod\n", - " def constraint_value(\n", - " vector: np.ndarray,\n", - " parameterisation: GeometryParameterisation,\n", - " lcfs: BluemiraWire,\n", - " min_distance: float,\n", - " ):\n", - " \"\"\"\n", - " The constraint evaluation function\n", - " \"\"\"\n", - " parameterisation.variables.set_values_from_norm(vector)\n", - " shape = parameterisation.create_shape()\n", - " return min_distance - distance_to(shape, lcfs)[0]\n", - "\n", - " @staticmethod\n", - " def f_constraint(\n", - " constraint: Callable,\n", - " vector: np.ndarray,\n", - " grad: np.ndarray,\n", - " parameterisation: GeometryParameterisation,\n", - " lcfs: BluemiraWire,\n", - " min_distance: float,\n", - " ad_args=None,\n", - " ):\n", - " \"\"\"\n", - " Constraint function\n", - " \"\"\"\n", - " tffunction = MyTFCoilOptProblem.constraint_value\n", - " constraint[:] = tffunction(vector, parameterisation, lcfs, min_distance)\n", - " if grad.size > 0:\n", - " grad[:] = approx_derivative(\n", - " tffunction,\n", - " vector,\n", - " f0=constraint,\n", - " args=(parameterisation, lcfs, min_distance),\n", - " bounds=[0, 1],\n", - " )\n", - " return constraint\n", - "\n", - " def optimise(self, x0=None):\n", - " \"\"\"\n", - " Run the optimisation problem.\n", - " \"\"\"\n", - " return super().optimise(x0)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "jZr76JfC0j7t" - }, - "source": [ - "We need to define some Designers and Builders for our various Components.\n", - "\n", - "Firstly the plasma. The plasma designer will, using its ParameterFrame, evaluate a JohnerLCFS geometry parameterisation, returning a wire representing the plasma's last-closed-flux-surface (LCFS).\n", - "\n", - "In this case PlasmaDesigner has some required parameters but PlasmaBuilder does not" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "F15mp4-B0pqo" - }, - "outputs": [], - "source": [ - "class PlasmaDesigner(Designer):\n", - " \"\"\"Design a plasma's LCFS using a Johner paramterisation.\"\"\"\n", - "\n", - " param_cls = PlasmaDesignerParams\n", - "\n", - " def run(self) -> GeometryParameterisation:\n", - " \"\"\"Build the LCFS, returning a closed wire defining its outline.\"\"\"\n", - " return self._build_wire(self.params)\n", - "\n", - " @staticmethod\n", - " def _build_wire(params: PlasmaDesignerParams) -> GeometryParameterisation:\n", - " return JohnerLCFS(\n", - " var_dict={\n", - " \"r_0\": {\"value\": params.R_0.value},\n", - " \"z_0\": {\"value\": params.z_0.value},\n", - " \"a\": {\"value\": params.R_0.value / params.A.value},\n", - " \"kappa_u\": {\"value\": params.kappa_u.value},\n", - " \"kappa_l\": {\"value\": params.kappa_l.value},\n", - " \"delta_u\": {\"value\": params.delta_u.value},\n", - " \"delta_l\": {\"value\": params.delta_l.value},\n", - " \"phi_u_pos\": {\"value\": params.phi_pos_u.value, \"lower_bound\": 0.0},\n", - " \"phi_u_neg\": {\"value\": params.phi_neg_u.value, \"lower_bound\": 0.0},\n", - " \"phi_l_pos\": {\"value\": params.phi_pos_l.value, \"lower_bound\": 0.0},\n", - " \"phi_l_neg\": {\n", - " \"value\": params.phi_neg_l.value,\n", - " \"lower_bound\": 0.0,\n", - " \"upper_bound\": 90,\n", - " },\n", - " }\n", - " )\n", - "\n", - "\n", - "class PlasmaBuilder(Builder):\n", - " \"\"\"Build the 3D geometry of a plasma from a given LCFS.\"\"\"\n", - "\n", - " param_cls = None\n", - "\n", - " def __init__(self, wire: BluemiraWire, build_config: Dict):\n", - " super().__init__(None, build_config)\n", - " self.wire = wire\n", - "\n", - " def build(self) -> Plasma:\n", - " \"\"\"\n", - " Run the full build of the Plasma\n", - " \"\"\"\n", - " xz = self.build_xz()\n", - " return Plasma(\n", - " self.component_tree(\n", - " xz=[xz],\n", - " xy=[Component(\"\")],\n", - " xyz=[self.build_xyz(xz.shape)],\n", - " )\n", - " )\n", - "\n", - " def build_xz(self) -> PhysicalComponent:\n", - " \"\"\"\n", - " Build a view of the plasma in the toroidal (xz) plane.\n", - "\n", - " This generates a ``PhysicalComponent``, whose shape is a face.\n", - " \"\"\"\n", - " component = PhysicalComponent(\"LCFS\", BluemiraFace(self.wire))\n", - " component.display_cad_options.color = BLUE_PALETTE[\"PL\"]\n", - " component.display_cad_options.transparency = 0.5\n", - " return component\n", - "\n", - " def build_xyz(self, lcfs: BluemiraFace) -> PhysicalComponent:\n", - " \"\"\"\n", - " Build the 3D (xyz) Component of the plasma by revolving the given face\n", - " 360 degrees.\n", - " \"\"\"\n", - " shape = revolve_shape(lcfs, degree=359)\n", - " component = PhysicalComponent(\"LCFS\", shape)\n", - " component.display_cad_options.color = BLUE_PALETTE[\"PL\"]\n", - " component.display_cad_options.transparency = 0.5\n", - " return component" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "EsDh8hCg0qgs" - }, - "source": [ - "And now the TF Coil, in this instance for simplicity we are only making one TF coil.\n", - "\n", - "The TF coil designer is finding the given geometry parameterisation given a string in the build_config which should point to a class. The parameterisation is then fed into the optimisation problem we made earlier. Finally when the designer is executed the optimisation problem is run to generate the centreline wire of the coil.\n", - "\n", - "The TF coil builder then is passed the centreline from the designer to create the Component and therefore the CAD of the TF coil. If more TF coils were to be required the build_xyz of TFCoilBuilder would need to be modified.\n", - "\n", - "Notice that only TFCoilBuilder has required parameters in this case." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "mZn-xn2G1EoJ" - }, - "outputs": [], - "source": [ - "class TFCoilDesigner(Designer):\n", - " \"\"\"TF coil Designer\"\"\"\n", - "\n", - " param_cls = None # This designer takes no parameters\n", - "\n", - " def __init__(self, plasma_lcfs, params, build_config):\n", - " super().__init__(params, build_config)\n", - " self.lcfs = plasma_lcfs\n", - " self.parameterisation_cls = get_class_from_module(\n", - " self.build_config[\"param_class\"],\n", - " default_module=\"bluemira.geometry.parameterisations\",\n", - " )\n", - "\n", - " def run(self) -> GeometryParameterisation:\n", - " \"\"\"TF coil run method\"\"\"\n", - " parameterisation = self.parameterisation_cls(\n", - " var_dict=self.build_config[\"var_dict\"]\n", - " )\n", - " my_tf_coil_opt_problem = MyTFCoilOptProblem(\n", - " parameterisation,\n", - " self.lcfs,\n", - " optimiser=Optimiser(\n", - " \"SLSQP\", opt_conditions={\"max_eval\": 5000, \"ftol_rel\": 1e-6}\n", - " ),\n", - " min_distance=1.0, # the coil must be >= 1 meter from the LCFS\n", - " )\n", - " return my_tf_coil_opt_problem.optimise()\n", - "\n", - "\n", - "class TFCoilBuilder(Builder):\n", - " \"\"\"\n", - " Build a 3D model of a TF Coil from a given centre line\n", - " \"\"\"\n", - "\n", - " param_cls = TFCoilBuilderParams\n", - "\n", - " def __init__(self, params, centreline):\n", - " super().__init__(params, {})\n", - " self.centreline = centreline\n", - "\n", - " def make_tf_wp_xs(self) -> BluemiraWire:\n", - " \"\"\"\n", - " Make a wire for the cross-section of the winding pack in xy.\n", - " \"\"\"\n", - " width = 0.5 * self.params.tf_wp_width.value\n", - " depth = 0.5 * self.params.tf_wp_depth.value\n", - " wire = make_polygon(\n", - " {\n", - " \"x\": [-width, width, width, -width],\n", - " \"y\": [-depth, -depth, depth, depth],\n", - " \"z\": 0.0,\n", - " },\n", - " closed=True,\n", - " )\n", - " return wire\n", - "\n", - " def build(self) -> TFCoil:\n", - " \"\"\"\n", - " Run the full build for the TF coils.\n", - " \"\"\"\n", - " return TFCoil(\n", - " self.component_tree(\n", - " xz=[self.build_xz()],\n", - " xy=[Component(\"\")],\n", - " xyz=[self.build_xyz()],\n", - " )\n", - " )\n", - "\n", - " def build_xz(self) -> PhysicalComponent:\n", - " \"\"\"\n", - " Build the xz Component of the TF coils.\n", - " \"\"\"\n", - " inner = offset_wire(self.centreline, -0.5 * self.params.tf_wp_width.value)\n", - " outer = offset_wire(self.centreline, 0.5 * self.params.tf_wp_width.value)\n", - " return PhysicalComponent(\"Winding pack\", BluemiraFace([outer, inner]))\n", - "\n", - " def build_xyz(self) -> PhysicalComponent:\n", - " \"\"\"\n", - " Build the xyz Component of the TF coils.\n", - " \"\"\"\n", - " wp_xs = self.make_tf_wp_xs()\n", - " wp_xs.translate((self.centreline.bounding_box.x_min, 0, 0))\n", - " volume = sweep_shape(wp_xs, self.centreline)\n", - " return PhysicalComponent(\"Winding pack\", volume)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "GF3PaI4w1Ft0" - }, - "source": [ - "Now let us setup our build configuration. This could be stored as a JSON file and read in but for simplicity it is all written here. Notice there are no 'global' parameters as neither of the components share a variable." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "n566kejh1NE2" - }, - "outputs": [], - "source": [ - "build_config = {\n", - " # This reactor has no global parameters, but this key would usually\n", - " # be used to set parameters that are shared between components\n", - " \"params\": {},\n", - " \"Plasma\": {\n", - " \"Designer\": {\n", - " \"params\": {\n", - " \"R_0\": {\n", - " \"value\": 9.0,\n", - " \"unit\": \"m\",\n", - " \"source\": \"Input\",\n", - " \"long_name\": \"Major radius\",\n", - " },\n", - " \"z_0\": {\n", - " \"value\": 0.0,\n", - " \"unit\": \"m\",\n", - " \"source\": \"Input\",\n", - " \"long_name\": \"Reference vertical coordinate\",\n", - " },\n", - " \"A\": {\n", - " \"value\": 3.1,\n", - " \"unit\": \"dimensionless\",\n", - " \"source\": \"Input\",\n", - " \"long_name\": \"Aspect ratio\",\n", - " },\n", - " \"kappa_u\": {\n", - " \"value\": 1.6,\n", - " \"unit\": \"dimensionless\",\n", - " \"source\": \"Input\",\n", - " \"long_name\": \"Upper elongation\",\n", - " },\n", - " \"kappa_l\": {\n", - " \"value\": 1.8,\n", - " \"unit\": \"dimensionless\",\n", - " \"source\": \"Input\",\n", - " \"long_name\": \"Lower elongation\",\n", - " },\n", - " \"delta_u\": {\n", - " \"value\": 0.4,\n", - " \"unit\": \"dimensionless\",\n", - " \"source\": \"Input\",\n", - " \"long_name\": \"Upper triangularity\",\n", - " },\n", - " \"delta_l\": {\n", - " \"value\": 0.4,\n", - " \"unit\": \"dimensionless\",\n", - " \"source\": \"Input\",\n", - " \"long_name\": \"Lower triangularity\",\n", - " },\n", - " \"phi_neg_u\": {\"value\": 0, \"unit\": \"degree\", \"source\": \"Input\"},\n", - " \"phi_pos_u\": {\"value\": 0, \"unit\": \"degree\", \"source\": \"Input\"},\n", - " \"phi_neg_l\": {\"value\": 0, \"unit\": \"degree\", \"source\": \"Input\"},\n", - " \"phi_pos_l\": {\"value\": 0, \"unit\": \"degree\", \"source\": \"Input\"},\n", - " },\n", - " },\n", - " },\n", - " \"TF Coil\": {\n", - " \"Designer\": {\n", - " \"runmode\": \"run\",\n", - " \"param_class\": \"PrincetonD\",\n", - " \"var_dict\": {\n", - " \"x1\": {\"value\": 3.0, \"fixed\": True},\n", - " \"x2\": {\"value\": 15, \"lower_bound\": 12},\n", - " },\n", - " },\n", - " \"Builder\": {\n", - " \"params\": {\n", - " \"tf_wp_width\": {\n", - " \"value\": 0.6,\n", - " \"unit\": \"m\",\n", - " \"source\": \"Input\",\n", - " \"long_name\": \"Width of TF coil winding pack\",\n", - " },\n", - " \"tf_wp_depth\": {\n", - " \"value\": 0.8,\n", - " \"unit\": \"m\",\n", - " \"source\": \"Input\",\n", - " \"long_name\": \"Depth of TF coil winding pack\",\n", - " },\n", - " },\n", - " },\n", - " },\n", - "}" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "kpvU4axY1OZM" - }, - "source": [ - "Now we set up our ParamterFrames" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ZTcNSYdK1R2V" - }, - "outputs": [], - "source": [ - "# TODO improve build config manipulation\n", - "plasma_params = PlasmaDesignerParams.from_dict(\n", - " {**build_config[\"params\"], **build_config[\"Plasma\"][\"Designer\"].pop(\"params\")}\n", - ")\n", - "\n", - "tf_coil_params = TFCoilBuilderParams.from_dict(\n", - " {**build_config[\"params\"], **build_config[\"TF Coil\"][\"Builder\"].pop(\"params\")}\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OmmSif8A1VOt" - }, - "source": [ - "We create our plasma" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "JPzQNKP51a5Q" - }, - "outputs": [], - "source": [ - "plasma_designer = PlasmaDesigner(plasma_params, build_config[\"Plasma\"])\n", - "plasma_parameterisation = plasma_designer.execute()\n", - "\n", - "plasma_builder = PlasmaBuilder(\n", - " plasma_parameterisation.create_shape(), build_config[\"Plasma\"]\n", - ")\n", - "plasma = plasma_builder.build()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "oeDmkGlf1bsn" - }, - "source": [ - "We create our TF coil" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Qd-LW3BC1epz" - }, - "outputs": [], - "source": [ - "tf_coil_designer = TFCoilDesigner(\n", - " plasma.lcfs(), None, build_config[\"TF Coil\"][\"Designer\"]\n", - ")\n", - "tf_parameterisation = tf_coil_designer.execute()\n", - "\n", - "tf_coil_builder = TFCoilBuilder(tf_coil_params, tf_parameterisation.create_shape())\n", - "tf_coil = tf_coil_builder.build()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ryT0YRKf1hQz" - }, - "source": [ - "Finally we add the components to the reactor and show the CAD" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "m1ges_Jk1kUP" - }, - "outputs": [], - "source": [ - "reactor = MyReactor(\"Simple Example\")\n", - "\n", - "reactor.plasma = plasma\n", - "reactor.tf_coil = tf_coil\n", - "\n", - "reactor.show_cad()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5keqr7gsaTPa" - }, - "source": [ - "Once the optimisation above has started you can login to https://app.simvue.io, go to the **Runs** page and you should be able to find the currently running optimisation." - ] - } - ], - "metadata": { - "colab": { - "include_colab_link": true, - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/notebooks/simvue_colab_example.ipynb b/notebooks/simvue_colab_example.ipynb deleted file mode 100644 index ea43a3e3..00000000 --- a/notebooks/simvue_colab_example.ipynb +++ /dev/null @@ -1,184 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "view-in-github" - }, - "source": [ - "\"Open\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "-pVhOfzLx9us" - }, - "source": [ - "# Using Simvue in Google Colab\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Install any dependencies if you have not already done so e.g. simvue" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install simvue" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import random\n", - "import time\n", - "from simvue import Run\n", - "\n", - "run = Run()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "wKJ4bd5rt1wy" - }, - "source": [ - "\n", - "### Initialisation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To proceed you need to specify the URL of the Simvue server and provide an access token used to authenticate to the server.\n", - "\n", - "Login to https://app.simvue.io, go to the **Runs** page and click **Create new run**. \n", - "\n", - "Set the environment variables using the values from the **Create new run** modal as follows" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "os.environ[\"SIMVUE_URL\"] = \"https://app.simvue.io\"\n", - "print(os.getenv(\"SIMVUE_URL\"))\n", - "\n", - "# The following avoids tokens being stored in the notebook\n", - "import getpass\n", - "\n", - "os.environ[\"SIMVUE_TOKEN\"] = getpass.getpass(prompt=\"Token: \")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Define the name of the run, metadata (key-value pairs) and tags (list of short labels). Metadata values are integers, floating point numbers or strings, and would typically be input or output parameters associated with a run. Runs can be filtered using both metadata and tags." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run.init(\n", - " name=\"hello-world-%d\" % time.time(),\n", - " tags=[\"test\"],\n", - " metadata={\"key1\": 1, \"key2\": \"hello\"},\n", - " folder=\"/tests\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Rmai0dD30XzL" - }, - "source": [ - "### Generate 10 random numbers, one per second" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for count in range(0, 10):\n", - " run.log_metrics({\"random_number\": 10 * random.random()})\n", - " time.sleep(1)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "8J3NBxtZpPcK" - }, - "source": [ - "### Close the run" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "run.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### To view the results login to https://app.simvue.io" - ] - } - ], - "metadata": { - "colab": { - "collapsed_sections": [], - "name": "colab-github-demo.ipynb", - "provenance": [], - "version": "0.3.2" - }, - "kernelspec": { - "display_name": "Python 3.8.8 ('base')", - "language": "python", - "name": "python3" - }, - "language_info": { - "name": "python", - "version": "3.8.8" - }, - "vscode": { - "interpreter": { - "hash": "cfd12c460843f72ac31856d2a5b94956cfc657ba97314f23f6b0471b6cc99e27" - } - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/notebooks/simvue_tensorflow_example.ipynb b/notebooks/simvue_tensorflow_example.ipynb deleted file mode 100644 index 52ca543d..00000000 --- a/notebooks/simvue_tensorflow_example.ipynb +++ /dev/null @@ -1,385 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "view-in-github" - }, - "source": [ - "\"Open\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "t8naAmYqvOcG" - }, - "source": [ - "\"Simvue\"\n", - "\n", - "# Simple Tensorflow example\n", - "\n", - "Taken from https://github.com/aymericdamien/TensorFlow-Examples/\n", - "\n", - "> TensorFlow 2.0 implementation of a Recurrent Neural Network (LSTM) that performs dynamic computation over sequences with variable length. This example is using a toy dataset to classify linear sequences. The generated sequences have variable length.\n", - "\n", - "In this example we take an existing Python code and make some minor changes in order to use Simvue to:\n", - "\n", - "* Record dataset, training and network parameters as metadata,\n", - "* Log metrics while the code is running, in this case acurracy and loss" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZY764bW-Y3Nd" - }, - "source": [ - "## Set an access token\n", - "A token needs to be specified in order to authenticate to the Simvue REST API. To obtain the token, login to https://app.simvue.io/runs and click **Create new run**. Copy the token and paste it into the box when prompted and push enter.\n", - "\n", - "It is important to note that the token will not be saved in this notebook." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "WwNo_ja_45Zz" - }, - "outputs": [], - "source": [ - "import os\n", - "import getpass\n", - "\n", - "os.environ[\"SIMVUE_URL\"] = \"https://app.simvue.io\"\n", - "os.environ[\"SIMVUE_TOKEN\"] = getpass.getpass(prompt=\"Token: \")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "h3m7b9siaHqG" - }, - "source": [ - "## Install dependencies" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "6iixvs2NWeso" - }, - "outputs": [], - "source": [ - "!pip install tensorflow simvue" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-MKgsIcIaL4R" - }, - "source": [ - "##\u00a0The code" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "SLDZTHMgv5P8" - }, - "outputs": [], - "source": [ - "from __future__ import absolute_import, division, print_function\n", - "\n", - "import os\n", - "import getpass\n", - "import tensorflow as tf\n", - "from tensorflow.keras import Model, layers\n", - "import numpy as np\n", - "import random\n", - "from simvue import Run" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "jz0hbM15wB5D" - }, - "outputs": [], - "source": [ - "# Dataset parameters.\n", - "num_classes = 2 # linear sequence or not.\n", - "seq_max_len = 20 # Maximum sequence length.\n", - "seq_min_len = 5 # Minimum sequence length (before padding).\n", - "masking_val = (\n", - " -1\n", - ") # -1 will represents the mask and be used to pad sequences to a common max length.\n", - "max_value = 10000 # Maximum int value.\n", - "\n", - "# Training Parameters\n", - "learning_rate = 0.001\n", - "training_steps = 2000\n", - "batch_size = 64\n", - "display_step = 10\n", - "\n", - "# Network Parameters\n", - "num_units = 32 # number of neurons for the LSTM layer." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "9-QpNOGY768t" - }, - "outputs": [], - "source": [ - "# Start a run and specify metadata\n", - "run = Run()\n", - "run.init(\n", - " metadata={\n", - " \"dataset.num_classes\": num_classes,\n", - " \"dataset.seq_max_len\": seq_max_len,\n", - " \"dataset.seq_min_len\": seq_min_len,\n", - " \"dataset.masking_val\": masking_val,\n", - " \"dataset.max_value\": max_value,\n", - " \"training.learning_rate\": learning_rate,\n", - " \"training.training_steps\": training_steps,\n", - " \"training.batch_size\": batch_size,\n", - " \"network.num_units\": num_units,\n", - " }\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "5o-wjSjYwFtU" - }, - "outputs": [], - "source": [ - "# ====================\n", - "# TOY DATA GENERATOR\n", - "# ====================\n", - "\n", - "\n", - "def toy_sequence_data():\n", - " \"\"\"Generate sequence of data with dynamic length.\n", - " This function generates toy samples for training:\n", - " - Class 0: linear sequences (i.e. [1, 2, 3, 4, ...])\n", - " - Class 1: random sequences (i.e. [9, 3, 10, 7,...])\n", - "\n", - " NOTICE:\n", - " We have to pad each sequence to reach 'seq_max_len' for TensorFlow\n", - " consistency (we cannot feed a numpy array with inconsistent\n", - " dimensions). The dynamic calculation will then be perform and ignore\n", - " the masked value (here -1).\n", - " \"\"\"\n", - " while True:\n", - " # Set variable sequence length.\n", - " seq_len = random.randint(seq_min_len, seq_max_len)\n", - " rand_start = random.randint(0, max_value - seq_len)\n", - " # Add a random or linear int sequence (50% prob).\n", - " if random.random() < 0.5:\n", - " # Generate a linear sequence.\n", - " seq = np.arange(start=rand_start, stop=rand_start + seq_len)\n", - " # Rescale values to [0., 1.].\n", - " seq = seq / max_value\n", - " # Pad sequence until the maximum length for dimension consistency.\n", - " # Masking value: -1.\n", - " seq = np.pad(\n", - " seq,\n", - " mode=\"constant\",\n", - " pad_width=(0, seq_max_len - seq_len),\n", - " constant_values=masking_val,\n", - " )\n", - " label = 0\n", - " else:\n", - " # Generate a random sequence.\n", - " seq = np.random.randint(max_value, size=seq_len)\n", - " # Rescale values to [0., 1.].\n", - " seq = seq / max_value\n", - " # Pad sequence until the maximum length for dimension consistency.\n", - " # Masking value: -1.\n", - " seq = np.pad(\n", - " seq,\n", - " mode=\"constant\",\n", - " pad_width=(0, seq_max_len - seq_len),\n", - " constant_values=masking_val,\n", - " )\n", - " label = 1\n", - " yield np.array(seq, dtype=np.float32), np.array(label, dtype=np.float32)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "xGRPh018wI9n" - }, - "outputs": [], - "source": [ - "# Use tf.data API to shuffle and batch data.\n", - "train_data = tf.data.Dataset.from_generator(\n", - " toy_sequence_data, output_types=(tf.float32, tf.float32)\n", - ")\n", - "train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "hA1660iBwLK0" - }, - "outputs": [], - "source": [ - "# Create LSTM Model.\n", - "class LSTM(Model):\n", - " # Set layers.\n", - " def __init__(self):\n", - " super(LSTM, self).__init__()\n", - " # Define a Masking Layer with -1 as mask.\n", - " self.masking = layers.Masking(mask_value=masking_val)\n", - " # Define a LSTM layer to be applied over the Masking layer.\n", - " # Dynamic computation will automatically be performed to ignore -1 values.\n", - " self.lstm = layers.LSTM(units=num_units)\n", - " # Output fully connected layer (2 classes: linear or random seq).\n", - " self.out = layers.Dense(num_classes)\n", - "\n", - " # Set forward pass.\n", - " def call(self, x, is_training=False):\n", - " # A RNN Layer expects a 3-dim input (batch_size, seq_len, num_features).\n", - " x = tf.reshape(x, shape=[-1, seq_max_len, 1])\n", - " # Apply Masking layer.\n", - " x = self.masking(x)\n", - " # Apply LSTM layer.\n", - " x = self.lstm(x)\n", - " # Apply output layer.\n", - " x = self.out(x)\n", - " if not is_training:\n", - " # tf cross entropy expect logits without softmax, so only\n", - " # apply softmax when not training.\n", - " x = tf.nn.softmax(x)\n", - " return x\n", - "\n", - "\n", - "# Build LSTM model.\n", - "lstm_net = LSTM()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "HhVoI5D-wNAu" - }, - "outputs": [], - "source": [ - "# Cross-Entropy Loss.\n", - "# Note that this will apply 'softmax' to the logits.\n", - "def cross_entropy_loss(x, y):\n", - " # Convert labels to int 64 for tf cross-entropy function.\n", - " y = tf.cast(y, tf.int64)\n", - " # Apply softmax to logits and compute cross-entropy.\n", - " loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x)\n", - " # Average loss across the batch.\n", - " return tf.reduce_mean(loss)\n", - "\n", - "\n", - "# Accuracy metric.\n", - "def accuracy(y_pred, y_true):\n", - " # Predicted class is the index of highest score in prediction vector (i.e. argmax).\n", - " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n", - " return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)\n", - "\n", - "\n", - "# Adam optimizer.\n", - "optimizer = tf.optimizers.Adam(learning_rate)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "1kkJ-nvswTux" - }, - "outputs": [], - "source": [ - "# Optimization process.\n", - "def run_optimization(x, y):\n", - " # Wrap computation inside a GradientTape for automatic differentiation.\n", - " with tf.GradientTape() as g:\n", - " # Forward pass.\n", - " pred = lstm_net(x, is_training=True)\n", - " # Compute loss.\n", - " loss = cross_entropy_loss(pred, y)\n", - "\n", - " # Variables to update, i.e. trainable variables.\n", - " trainable_variables = lstm_net.trainable_variables\n", - "\n", - " # Compute gradients.\n", - " gradients = g.gradient(loss, trainable_variables)\n", - "\n", - " # Update weights following gradients.\n", - " optimizer.apply_gradients(zip(gradients, trainable_variables))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "3Ot5AWLKwVdH" - }, - "outputs": [], - "source": [ - "# Run training for the given number of steps.\n", - "for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):\n", - " # Run the optimization to update W and b values.\n", - " run_optimization(batch_x, batch_y)\n", - "\n", - " if step % display_step == 0 or step == 1:\n", - " pred = lstm_net(batch_x, is_training=True)\n", - " loss = cross_entropy_loss(pred, batch_y)\n", - " acc = accuracy(pred, batch_y)\n", - " print(\"step: %i, loss: %f, accuracy: %f\" % (step, loss, acc))\n", - "\n", - " # Log metrics to Simvue\n", - " run.log_metrics({\"loss\": float(loss), \"accuracy\": float(acc)}, step=step)\n", - "\n", - "# End the run\n", - "run.close()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5keqr7gsaTPa" - }, - "source": [ - "Once the training above has started you can login to https://app.simvue.io, go to the **Runs** page and you should be able to find the currently running training." - ] - } - ], - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/notebooks/temperatures.sh b/notebooks/temperatures.sh new file mode 100644 index 00000000..3e31af13 --- /dev/null +++ b/notebooks/temperatures.sh @@ -0,0 +1,41 @@ +#!/bin/bash +""" +This file is used to create dummy temperature data for the Non-Python Example. +""" +# Output file +script_dir=$(dirname "$0") +output_file="$script_dir/temperatures.csv" + +# Parameters +initial_temp=20 +linear_rate=5 +decay_rate=0.05 +num_linear_steps=10 +num_decay_steps=20 +time_per_step=5 + +# Create or clear the output file +echo "Step,Time,Temperature" > $output_file + +# Generate linear increase +echo "Sample is being heated" + +for ((i=0; i> $output_file + sleep $time_per_step +done + +# Generate exponential decay +echo "Sample is cooling" + +for ((i=0; i<=num_decay_steps; i++)); do + step=$((num_linear_steps + i)) + time=$((step * time_per_step)) + temp=$(awk "BEGIN {print $initial_temp + $num_linear_steps * $linear_rate * exp(-$decay_rate * $i)}") + echo "$step,$time,$temp" >> $output_file + sleep $time_per_step +done + +echo "Temperature data has been written to $output_file" diff --git a/pyproject.toml b/pyproject.toml index 7b03d9e5..f2bfee7d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "simvue" -version = "2.0.0a3" +version = "2.0.0rc1" description = "Simulation tracking and monitoring" authors = [ {name = "Simvue Development Team", email = "info@simvue.io"} diff --git a/simvue/api/objects/artifact/file.py b/simvue/api/objects/artifact/file.py index 54f0a8cb..7b2ce312 100644 --- a/simvue/api/objects/artifact/file.py +++ b/simvue/api/objects/artifact/file.py @@ -87,7 +87,7 @@ def new( if offline: return _artifact - with open(file_path, "rb") as out_f: + with open(_file_orig_path, "rb") as out_f: _artifact._upload(file=out_f) return _artifact diff --git a/simvue/api/objects/base.py b/simvue/api/objects/base.py index 84de6e1e..f3b607ef 100644 --- a/simvue/api/objects/base.py +++ b/simvue/api/objects/base.py @@ -37,6 +37,9 @@ except ImportError: from typing_extensions import Self +# Need to use this inside of Generator typing to fix bug present in Python 3.10 - see issue #745 +T = typing.TypeVar("T", bound="SimvueObject") + def staging_check(member_func: typing.Callable) -> typing.Callable: """Decorator for checking if requested attribute has uncommitted changes""" @@ -318,7 +321,7 @@ def get( count: pydantic.PositiveInt | None = None, offset: pydantic.NonNegativeInt | None = None, **kwargs, - ) -> typing.Generator[tuple[str, Self | None], None, None]: + ) -> typing.Generator[tuple[str, T | None], None, None]: _class_instance = cls(_read_only=True, _local=True) if (_data := cls._get_all_objects(count, offset, **kwargs).get("data")) is None: raise RuntimeError( diff --git a/simvue/eco.py b/simvue/eco.py index 50db508f..76e2d694 100644 --- a/simvue/eco.py +++ b/simvue/eco.py @@ -35,11 +35,11 @@ def out( try: self._simvue_run.update_metadata( { - "codecarbon": { + "sustainability": { "country": total.country_name, "country_iso_code": total.country_iso_code, "region": total.region, - "version": total.codecarbon_version, + "codecarbon_version": total.codecarbon_version, } } ) @@ -57,10 +57,10 @@ def out( try: self._simvue_run.log_metrics( metrics={ - "codecarbon.total.emissions": total.emissions, - "codecarbon.total.energy_consumed": total.energy_consumed, - "codecarbon.delta.emissions": delta.emissions, - "codecarbon.delta.energy_consumed": delta.energy_consumed, + "sustainability.emissions.total": total.emissions, + "sustainability.energy_consumed.total": total.energy_consumed, + "sustainability.emissions.delta": delta.emissions, + "sustainability.energy_consumed.delta": delta.energy_consumed, }, step=self._metrics_step, timestamp=simvue_timestamp(_cc_timestamp), diff --git a/tests/functional/test_run_class.py b/tests/functional/test_run_class.py index e4f6f55a..2e9a8b1a 100644 --- a/tests/functional/test_run_class.py +++ b/tests/functional/test_run_class.py @@ -58,8 +58,8 @@ def test_run_with_emissions() -> None: _metric_names = [item[0] for item in _run.metrics] client = sv_cl.Client() for _metric in ["emissions", "energy_consumed"]: - _total_metric_name = f'codecarbon.total.{_metric}' - _delta_metric_name = f'codecarbon.delta.{_metric}' + _total_metric_name = f'sustainability.{_metric}.total' + _delta_metric_name = f'sustainability.{_metric}.delta' assert _total_metric_name in _metric_names assert _delta_metric_name in _metric_names _metric_values = client.get_metric_values(metric_names=[_total_metric_name, _delta_metric_name], xaxis="time", output_format="dataframe", run_ids=[run_created.id])