diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..6ad5586 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,3 @@ +sphinx-copybutton +sphinx_github_style +sphinx_rtd_theme \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index e52fe01..445e977 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -5,23 +5,277 @@ # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information +# -- Path setup -------------------------------------------------------------- -project = 'adept' -copyright = '2023, Archis Joglekar' -author = 'Archis Joglekar' +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration -extensions = [] +# -- Project information ----------------------------------------------------- + +import inspect +import os +import subprocess +import sys +from pathlib import Path -templates_path = ['_templates'] -exclude_patterns = [] +sys.path.insert(0, os.path.abspath(".")) +sys.path.append(os.path.abspath("../../")) +import adept +project = "ADEPT" +copyright = "2024, Archis Joglekar" +author = "Archis Joglekar" # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -html_theme = 'alabaster' -html_static_path = ['_static'] +html_theme = "sphinx_rtd_theme" +html_static_path = ["_static"] + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.coverage", + "sphinx.ext.doctest", + "sphinx.ext.githubpages", + "sphinx.ext.intersphinx", + "sphinx.ext.mathjax", + "sphinx.ext.napoleon", + "sphinx_copybutton", + "sphinx_github_style", +] +# options for sphinx_github_style +top_level = "inverse_thomson_scattering" +linkcode_blob = "head" +linkcode_url = r"https://github.com/ergodicio/inverse-thomson-scattering/" +linkcode_link_text = "Source" + + +def linkcode_resolve(domain, info): + """Returns a link to the source code on GitHub, with appropriate lines highlighted""" + + if domain != "py" or not info["module"]: + return None + + modname = info["module"] + fullname = info["fullname"] + + submod = sys.modules.get(modname) + if submod is None: + return None + + obj = submod + for part in fullname.split("."): + try: + obj = getattr(obj, part) + except AttributeError: + return None + + # for jitted stuff, get the original src + if hasattr(obj, "__wrapped__"): + obj = obj.__wrapped__ + + # get the link to HEAD + cmd = "git log -n1 --pretty=%H" + try: + # get most recent commit hash + head = subprocess.check_output(cmd.split()).strip().decode("utf-8") + + # if head is a tag, use tag as reference + cmd = "git describe --exact-match --tags " + head + try: + tag = subprocess.check_output(cmd.split(" ")).strip().decode("utf-8") + blob = tag + + except subprocess.CalledProcessError: + blob = head + + except subprocess.CalledProcessError: + print("Failed to get head") # so no head? + blob = "main" + + linkcode_url = r"https://github.com/ergodicio/inverse-thomson-scattering/" + linkcode_url = linkcode_url.strip("/") + f"/blob/{blob}/" + linkcode_url += "{filepath}#L{linestart}-L{linestop}" + + # get a Path object representing the working directory of the repository. + try: + cmd = "git rev-parse --show-toplevel" + repo_dir = Path(subprocess.check_output(cmd.split(" ")).strip().decode("utf-8")) + + except subprocess.CalledProcessError as e: + raise RuntimeError("Unable to determine the repository directory") from e + + # For ReadTheDocs, repo is cloned to /path/to//checkouts// + if repo_dir.parent.stem == "checkouts": + repo_dir = repo_dir.parent.parent + + # path to source file + try: + filepath = os.path.relpath(inspect.getsourcefile(obj), repo_dir) + if filepath is None: + return + except Exception: + return None + + # lines in source file + try: + source, lineno = inspect.getsourcelines(obj) + except OSError: + return None + else: + linestart, linestop = lineno, lineno + len(source) - 1 + + # Fix links with "../../../" or "..\\..\\..\\" + filepath = "/".join(filepath[filepath.find(top_level) :].split("\\")) + + final_link = linkcode_url.format(filepath=filepath, linestart=linestart, linestop=linestop) + print(f"Final Link for {fullname}: {final_link}") + return final_link + + +# numpydoc_class_members_toctree = False +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = False +napoleon_include_init_with_doc = False +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = False + +autodoc_default_options = { + "member-order": "bysource", + "special-members": "__call__", + "exclude-members": "__init__", +} +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = [".rst", ".md"] +# source_suffix = { +# '.rst': 'restructuredtext', +# '.md': 'markdown', +# } +# The master toctree document. +master_doc = "index" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = "en" + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.rst"] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +# html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. + +html_theme_options = { + # 'canonical_url': '', + # 'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard + "logo_only": True, + "display_version": True, + "prev_next_buttons_location": "both", + "style_external_links": False, + "style_nav_header_background": "#3c4142", + # Toc options + "collapse_navigation": True, + "sticky_navigation": True, + "navigation_depth": 2, + "includehidden": True, + "titles_only": False, +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] +html_css_files = ["custom.css"] + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = "_static/images/logo_small_clear.png" + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = "_static/images/desc_icon.ico" + + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +html_last_updated_fmt = "%b %d, %Y" + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +html_domain_indices = True + +# If false, no index is generated. +html_use_index = True + +# If true, the index is split into individual pages for each letter. +html_split_index = False + +# If true, links to the reST sources are added to the pages. +html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = "adept" diff --git a/docs/source/custom_sims.rst b/docs/source/custom_sims.rst new file mode 100644 index 0000000..7dda7f1 --- /dev/null +++ b/docs/source/custom_sims.rst @@ -0,0 +1,17 @@ +Running Custom Simulations +----------------------------- + +The following sections provide more detailed information on how to customize `adept` simulations + +note: The following sections are intended for users who are already familiar with the basic usage of +ADEPT. If you are new to ADEPT, please run a few examples from the example configs to become familiar. + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + usage/initialization + usage/outputs + usage/analysis + usage/visualization + \ No newline at end of file diff --git a/docs/source/density.rst b/docs/source/density.rst new file mode 100644 index 0000000..e69de29 diff --git a/docs/source/dev_guide.rst b/docs/source/dev_guide.rst new file mode 100644 index 0000000..c10d396 --- /dev/null +++ b/docs/source/dev_guide.rst @@ -0,0 +1,109 @@ +Developer Guide +--------------- + +In case you are interested in looking past the forward simulation use case, that is, if you are interested in running a program which is not just + +.. code-block:: bash + + python3 run.py --cfg config// + +This runs a forward simulation with the specified input parameters. It calls functions within `utils/runner.py` for this. +The most important one to understand is the ``_run_`` function. Here is a stripped down pseudo-code version + +.. code-block:: python + + def run(cfg: Dict) -> Tuple[Solution, Dict]: + """ + This function is the main entry point for running a simulation. It takes a configuration dictionary and returns a + ``diffrax.Solution`` object and a dictionary of datasets. + + Args: + cfg: A dictionary containing the configuration for the simulation. + + Returns: + A tuple of a Solution object and a dictionary of ``xarray.dataset``s. + + """ + t__ = time.time() # starts the timer + + helpers = get_helpers(cfg["mode"]) # gets the right helper functions depending on the desired simulation + + with tempfile.TemporaryDirectory() as td: # creates a temporary directory to store the simulation data + with open(os.path.join(td, "config.yaml"), "w") as fi: # writes the configuration to the temporary directory + yaml.dump(cfg, fi) + + # NB - this is not yet solver specific but should be + cfg = write_units(cfg, td) # writes the units to the temporary directory + + # NB - this is solver specific + cfg = helpers.get_derived_quantities(cfg) # gets the derived quantities from the configuration + misc.log_params(cfg) # logs the parameters to mlflow + + # NB - this is solver specific + cfg["grid"] = helpers.get_solver_quantities(cfg) # gets the solver quantities from the configuration + cfg = helpers.get_save_quantities(cfg) # gets the save quantities from the configuration + + # create the dictionary of time quantities that is given to the time integrator and save manager + tqs = { + "t0": cfg["grid"]["tmin"], + "t1": cfg["grid"]["tmax"], + "max_steps": cfg["grid"]["max_steps"], + "save_t0": cfg["grid"]["tmin"], + "save_t1": cfg["grid"]["tmax"], + "save_nt": cfg["grid"]["tmax"], + } + + # in case you are using ML models + models = helpers.get_models(cfg["models"]) if "models" in cfg else None + + # initialize the state for the solver - NB - this is solver specific + state = helpers.init_state(cfg) + + # NB - this is solver specific + # Remember that we rely on the diffrax library to provide the ODE (time, usually) integrator + # So we need to create the diffrax terms, solver, and save objects + diffeqsolve_quants = helpers.get_diffeqsolve_quants(cfg) + + # run + t0 = time.time() + + @eqx.filter_jit + def _run_(these_models, time_quantities: Dict): + args = {"drivers": cfg["drivers"]} + if these_models is not None: + args["models"] = these_models + if "terms" in cfg.keys(): + args["terms"] = cfg["terms"] + + return diffeqsolve( + terms=diffeqsolve_quants["terms"], + solver=diffeqsolve_quants["solver"], + t0=time_quantities["t0"], + t1=time_quantities["t1"], + max_steps=time_quantities["max_steps"], + dt0=cfg["grid"]["dt"], + y0=state, + args=args, + saveat=SaveAt(**diffeqsolve_quants["saveat"]), + ) + + result = _run_(models, tqs) + mlflow.log_metrics({"run_time": round(time.time() - t0, 4)}) # logs the run time to mlflow + + t0 = time.time() + # NB - this is solver specific + datasets = helpers.post_process(result, cfg, td) # post-processes the result + mlflow.log_metrics({"postprocess_time": round(time.time() - t0, 4)}) # logs the post-process time to mlflow + mlflow.log_artifacts(td) # logs the temporary directory to mlflow + + mlflow.log_metrics({"total_time": round(time.time() - t__, 4)}) # logs the total time to mlflow + + # fin + return result, datasets + + +Here, we are heavily relying on two open-source libraries. + +1. **MLFlow** as an experiment manager to log parameters, metrics, and artifacts + +2. **Diffrax** to solve the ODEs diff --git a/docs/source/driver.rst b/docs/source/driver.rst new file mode 100644 index 0000000..7a76cae --- /dev/null +++ b/docs/source/driver.rst @@ -0,0 +1,6 @@ +# Ponderomotive driver + +The ponderomotive driver provides an external electric field that can drive waves in the plasma. + +It has a spatiotemporal profile that can be customized. Like all the other spatiotemporal profiles in this code, +it is parameterized via 5 parameters diff --git a/docs/source/faq.rst b/docs/source/faq.rst new file mode 100644 index 0000000..7bf6e10 --- /dev/null +++ b/docs/source/faq.rst @@ -0,0 +1,29 @@ +FAQ +----- + +**Q. Why use MLflow?** + +A. MLFlow handles all the incredibly rote metadata management that computational scientists have historically either just +completely ignored, written in an excel file, used a lab notebook, etc (You may always be an exception!). + +You can store parameters (inputs to the simulation, box size, driver parameters, etc.), metrics (run time, total electrostatic energy, temperature at t=200 ps etc.) +and artifacts (the fields, distribution functions, plots, post processed quantities, configuration etc.) in a single place. + +This place can either be your local machine, or better yet, a remote server that is backed by a database and an object store. + +--------------------- + +**Q. Why use xarray?** + +A. Xarray is a great way to handle gridded data. It is performant, has a stable API, has high level plotting features. It is fairly portable, maybe not as much as HDF5, but it is a netCDF4 file so +it can't be that bad! + +--------------------- + +**Q. Why use diffrax?** + +A. Diffrax provides the ODE integrator capabilities. However, you can, and we often do, side-step the actual time-integrator but only use diffrax for yet again, a stable API that enables us to +save data in a consistent way. Diffrax lets us pass functions to the integrator, which is a great way to store custom post-processed (e.g. interpolated) quantities. You can also handle the result +in the same consistent way. Yes, we could have just designed an API. But diffrax DOES also provide the time integrator. + +Another thing diffrax does is it has a great loop handling system that compiles much faster than anything I have written. I don't know why that is, but it is. diff --git a/docs/source/index.rst b/docs/source/index.rst index 14cf652..c9c362a 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -3,7 +3,7 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -Welcome to adept's documentation! +ADEPT ================================= **ADEPT** is a set of **A** utomatic **D** ifferentation **E** nabled **P** lasma **T** ransport codes. @@ -12,13 +12,15 @@ In some form or the other, they solve the equations of motion for a plasma. So far, we have implemented -1. Two fluid - Poisson system in 1D -2. Vlasov-Poisson system in 2D +1. Vlasov-Poisson-Fokker-Planck 1D1V +2. Two fluid - Poisson system in 1D +3. Vlasov-Poisson-Fokker-Planck 1D2V +4. Vlasov-Poisson system in 2D What is novel about it? ------------------------ -- Automatic Differentiation (AD) Enabled (bc of JAX, Diffrax etc.) -- GPU-capable (bc of JAX, XLA) +- Automatic Differentiation (AD) Enabled (bc of JAX) +- GPU-capable (bc of XLA) - Experiment manager enabled (bc of mlflow) - Pythonic @@ -46,18 +48,28 @@ A couple of implemented examples are See ref. [2] for details and an application -What does an experiment manager do for us? -------------------------------------------------- -An experiment manager, namely `mlflow` here, simplifies management of simulation configurations and artifacts. -We run `mlflow` on the cloud so we have a central, web-accessible store for all simulation objects. This saves all the -data and simulation management effort and just let `mlflow` manage everything. To see for yourself, -just run a simulation and type `mlflow ui` into your terminal and see what happens :) -Run custom simulations ------------------------- -Take one of the `config`s in the `/configs` directory and modify it as you wish. Then use `run.py` to run the simulation -You will find the results using the mlflow ui. You can find the binary run information will be stored using mlflow as well. +Documentation +------------------ + +.. toctree:: + usage + custom_sims + faq + dev_guide + api_documentation + + :maxdepth: 2 + :caption: Contents: + + +.. note:: + + This project is under active development. + + Contributing guide ------------------------ The contributing guide is in development but for now, just make an issue / pull request and we can go from there :) @@ -75,22 +87,10 @@ Citation [1] A. S. Joglekar and A. G. R. Thomas, “Machine learning of hidden variables in multiscale fluid simulation,” Mach. Learn.: Sci. Technol., vol. 4, no. 3, p. 035049, Sep. 2023, doi: 10.1088/2632-2153/acf81a. -.. toctree:: - usage - tests - tf1d - :maxdepth: 2 - :caption: Contents: - - - -.. note:: - - This project is under active development. -Indices and tables -================== +.. Indices and tables +.. ================== -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` +.. * :ref:`genindex` +.. * :ref:`modindex` +.. * :ref:`search` diff --git a/docs/source/tests.rst b/docs/source/tests.rst index d9d235a..a642bd3 100644 --- a/docs/source/tests.rst +++ b/docs/source/tests.rst @@ -1,7 +1,32 @@ Tests ============= + +Run tests and examples +------------------------------ +First install pytest via + +.. code-block:: console + + (venv) $ pip install pytest + (venv) $ pytest + +.. code-block:: console + + (adept) $ pip install pytest + (adept) $ pytest + +This will run all the tests, which will likely include relatively expensive 2D2V Vlasov simulations. +If you only want to see example usage, you can choose particular tests by using the `-k` flag. + + The package is tested against +1D1V Vlasov implementation +-------------------------------- +- `test_landau_damping.py` - recover the real part and imaginary part (Landau damping) of the resoance according to the kinetic dispersion relation +- `test_absorbing_wave.py` - make sure the absorbing boundary conditions for the wave solver for the vector potential work correctly + + 1D two-fluid implementation -------------------------------- @@ -19,10 +44,3 @@ The package is tested against - `test_landau_damping.py` - recover the Landau damping rate according to the kinetic dispersion relation using a phenomenological term -To run the tests ------------------- -.. code-block:: console - - (venv) $ pip install pytest - (venv) $ pytest - (venv) $ pytest tests/ \ No newline at end of file diff --git a/docs/source/usage.rst b/docs/source/usage.rst index 6c8d902..ae7de0c 100644 --- a/docs/source/usage.rst +++ b/docs/source/usage.rst @@ -20,21 +20,20 @@ or using conda: $ mamba activate adept (adept) $ +-------------- -Run tests and examples ----------- -First install pytest via -.. code-block:: console - - (venv) $ pip install pytest - (venv) $ pytest +Run an example +-------------- -.. code-block:: console +The most common and obvious use case for ADEPT is a simple forward simulation that can be run from the command line. For example, to run a 1D1V Vlasov simulation of a driven electron plasma wave, use the following command: - (adept) $ pip install pytest - (adept) $ pytest +.. code-block:: bash + + (venv) $ python3 run.py --config configs/vlasov-1d/epw -This will run all the tests, which will likely include relatively expensive 2D2V Vlasov simulations. -If you only want to see example usage, you can choose particular tests by using the `-k` flag. +The input parameters are provided in `configs/vlasov-1d/epw.yaml`. The output will be saved and made accessible via MLFlow. To access it, +1. Launch an mlflow server via running ``mlflow ui`` from the command line +2. Open a web browser and navigate to http://localhost:5000 +3. Click on the experiment name to see the results diff --git a/docs/source/usage/initialization.rst b/docs/source/usage/initialization.rst new file mode 100644 index 0000000..643a572 --- /dev/null +++ b/docs/source/usage/initialization.rst @@ -0,0 +1,39 @@ +Initialization Details +----------------------- + +This is different per simulation type but there are some consistent concepts. These are that the density can be defined +as a function of space, the driver (antenna or ponderomotive) can be a function of time and space, and the collision frequency +can be a function of time and space. + +That is, you can specify, + +.. math:: + n(x), E(t, x), \nu(t, x) + +Each of these profiles is provided using a tanh function that is parameterized using + +``p_{wL}``, ``p_{wR}`` - the rise and fall of the flat-top + +``p_w`` - width of the flat-top + +``p_c`` - center of the flat-top + +``p_L = p_c - p_w / 2`` - left edge of the flat-top + +``p_R = p_c + p_w / 2`` - right edge of the flat-top + +where p can be the time or space coordinate depending on the context + +Then, the overall shape is given by + +.. math:: + f(p) = 0.5 * \tanh((ax - p_L) / p_{wL}) - \tanh((ax - p_R) / p_{wR}) + +where ``ax`` is the time or space axis + +If you plot this, it looks like a flat top centered at ``p_c`` and has a width ``p_w``, and a rise and fall of ``p_{wL}`` and ``p_{wR}`` respectively. + + + + + diff --git a/docs/source/vlasov1d.rst b/docs/source/vlasov1d.rst new file mode 100644 index 0000000..d06ee00 --- /dev/null +++ b/docs/source/vlasov1d.rst @@ -0,0 +1,17 @@ +Vlasov-Poisson 1D1V +======================= + +To run the code for Vlasov-Poisson 1D1V, use the configs in `configs/vlasov1d/`. +There is a `mode` option in the config file which tells `ADEPT` which solver to use. + +----------------------- +### Relevant features + +1. Initialization - You can initialize the distribution function using a uniform or non-uniform density profile +2. Ponderomotive driver - You will need a driver to drive up a wave + +Typical simulations +1. Single mode plasma waves (Landau damping, trapping) +2. Finite length plasma waves (Wavepackets) +3. Density gradients +4. Stimulated Raman Scattering diff --git a/utils/runner.py b/utils/runner.py index 68c7d92..38c2ae5 100644 --- a/utils/runner.py +++ b/utils/runner.py @@ -95,23 +95,37 @@ def run_job(run_id, nested): def run(cfg: Dict) -> Tuple[Solution, Dict]: - t__ = time.time() + """ + This function is the main entry point for running a simulation. It takes a configuration dictionary and returns a + ``diffrax.Solution`` object and a dictionary of datasets. - helpers = get_helpers(cfg["mode"]) + Args: + cfg: A dictionary containing the configuration for the simulation. - with tempfile.TemporaryDirectory() as td: - with open(os.path.join(td, "config.yaml"), "w") as fi: + Returns: + A tuple of a Solution object and a dictionary of ``xarray.dataset``s. + + """ + t__ = time.time() # starts the timer + + helpers = get_helpers(cfg["mode"]) # gets the right helper functions depending on the desired simulation + + with tempfile.TemporaryDirectory() as td: # creates a temporary directory to store the simulation data + with open(os.path.join(td, "config.yaml"), "w") as fi: # writes the configuration to the temporary directory yaml.dump(cfg, fi) - cfg = write_units(cfg, td) + # NB - this is not yet solver specific but should be + cfg = write_units(cfg, td) # writes the units to the temporary directory - # get derived quantities - cfg = helpers.get_derived_quantities(cfg) - misc.log_params(cfg) + # NB - this is solver specific + cfg = helpers.get_derived_quantities(cfg) # gets the derived quantities from the configuration + misc.log_params(cfg) # logs the parameters to mlflow - cfg["grid"] = helpers.get_solver_quantities(cfg) - cfg = helpers.get_save_quantities(cfg) + # NB - this is solver specific + cfg["grid"] = helpers.get_solver_quantities(cfg) # gets the solver quantities from the configuration + cfg = helpers.get_save_quantities(cfg) # gets the save quantities from the configuration + # create the dictionary of time quantities that is given to the time integrator and save manager tqs = { "t0": cfg["grid"]["tmin"], "t1": cfg["grid"]["tmax"], @@ -121,14 +135,20 @@ def run(cfg: Dict) -> Tuple[Solution, Dict]: "save_nt": cfg["grid"]["tmax"], } + # in case you are using ML models models = helpers.get_models(cfg["models"]) if "models" in cfg else None + + # initialize the state for the solver - NB - this is solver specific state = helpers.init_state(cfg) + # NB - this is solver specific + # Remember that we rely on the diffrax library to provide the ODE (time, usually) integrator + # So we need to create the diffrax terms, solver, and save objects + diffeqsolve_quants = helpers.get_diffeqsolve_quants(cfg) + # run t0 = time.time() - diffeqsolve_quants = helpers.get_diffeqsolve_quants(cfg) - @eqx.filter_jit def _run_(these_models, time_quantities: Dict): args = {"drivers": cfg["drivers"]} @@ -150,15 +170,15 @@ def _run_(these_models, time_quantities: Dict): ) result = _run_(models, tqs) - mlflow.log_metrics({"run_time": round(time.time() - t0, 4)}) + mlflow.log_metrics({"run_time": round(time.time() - t0, 4)}) # logs the run time to mlflow t0 = time.time() - datasets = helpers.post_process(result, cfg, td) - mlflow.log_metrics({"postprocess_time": round(time.time() - t0, 4)}) - mlflow.log_artifacts(td) + # NB - this is solver specific + datasets = helpers.post_process(result, cfg, td) # post-processes the result + mlflow.log_metrics({"postprocess_time": round(time.time() - t0, 4)}) # logs the post-process time to mlflow + mlflow.log_artifacts(td) # logs the temporary directory to mlflow - mlflow.log_metrics({"total_time": round(time.time() - t__, 4)}) + mlflow.log_metrics({"total_time": round(time.time() - t__, 4)}) # logs the total time to mlflow # fin - return result, datasets