diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e9767227a1..204e5592b6 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,5 +1,14 @@ version: 2 updates: +- package-ecosystem: pip + directory: /requirements + schedule: + interval: monthly + groups: + actions: + patterns: + - 'typecheck.txt' + - package-ecosystem: github-actions directory: /.github/workflows schedule: diff --git a/.github/workflows/bleeding-edge.yaml b/.github/workflows/bleeding-edge.yaml index 0807341e48..3567465440 100644 --- a/.github/workflows/bleeding-edge.yaml +++ b/.github/workflows/bleeding-edge.yaml @@ -61,8 +61,7 @@ jobs: # are not installed by pip as specified from pyproject.toml, hence we get # to use the dev version of numpy at build time. run: | - python setup.py build_ext -q -j2 - python -m pip install -e .[test] --no-build-isolation + python -m pip -v install -e .[test] --no-build-isolation - run: python -m pip list diff --git a/.github/workflows/type-checking.yaml b/.github/workflows/type-checking.yaml index 6931f4a45e..83713c2bc0 100644 --- a/.github/workflows/type-checking.yaml +++ b/.github/workflows/type-checking.yaml @@ -7,7 +7,8 @@ on: pull_request: paths: - yt/**/*.py - - setup.cfg + - pyproject.toml + - requirements/typecheck.txt - .github/workflows/type-checking.yaml workflow_dispatch: @@ -37,7 +38,7 @@ jobs: - name: Build run: | python3 -m pip install --upgrade pip - python3 -m pip install -e .[typecheck] + python3 -m pip install -e . -r requirements/typecheck.txt - run: python -m pip list diff --git a/.github/workflows/wheels.yaml b/.github/workflows/wheels.yaml index 41340b78b8..cd63fbc161 100644 --- a/.github/workflows/wheels.yaml +++ b/.github/workflows/wheels.yaml @@ -13,6 +13,7 @@ on: - MANIFEST.in workflow_dispatch: + jobs: build_wheels: name: Build wheels on ${{ matrix.os }} @@ -21,7 +22,7 @@ jobs: matrix: os: [ ubuntu-20.04, - windows-2019, + windows-2022, macos-13, # x86_64 macos-14, # arm64 ] @@ -32,18 +33,9 @@ jobs: uses: actions/checkout@v4 - name: Build wheels for CPython - uses: pypa/cibuildwheel@v2.17.0 + uses: pypa/cibuildwheel@v2.21.1 with: output-dir: dist - env: - CIBW_BUILD: "cp39-* cp310-* cp311-* cp312-*" - CIBW_SKIP: "*-musllinux_*" # numpy doesn't have wheels for musllinux so we can't build some quickly and without bloating - CIBW_ARCHS_LINUX: "x86_64" - CIBW_ARCHS_MACOS: auto - MACOSX_DEPLOYMENT_TARGET: "10.9" # as of CIBW 2.9, this is the default value, pin it so it can't be bumped silently - CIBW_ARCHS_WINDOWS: auto64 - CIBW_ENVIRONMENT: "LDFLAGS='-static-libstdc++'" - CIBW_BUILD_VERBOSITY: 1 - uses: actions/upload-artifact@v4 with: @@ -57,9 +49,22 @@ jobs: - name: Checkout repo uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.9' + - name: Build sdist run: pipx run build --sdist + - name: Test sdist + run: | + python -m pip install "$(echo dist/*.tar.gz)[test]" + python -m pip list + project_dir=$(pwd) + cd ../../ + pytest -c $project_dir/pyproject.toml --rootdir . --color=yes --pyargs yt + - name: Upload sdist uses: actions/upload-artifact@v4 with: @@ -120,7 +125,7 @@ jobs: merge-multiple: true - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.14 + uses: pypa/gh-action-pypi-publish@v1.10.2 with: user: __token__ password: ${{ secrets.pypi_token }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d507680f64..1b39e022ad 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,7 +18,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer @@ -30,19 +30,22 @@ repos: # TODO: replace this with ruff when it supports embedded python blocks # see https://github.com/astral-sh/ruff/issues/8237 - repo: https://github.com/adamchainz/blacken-docs - rev: 1.16.0 + rev: 1.18.0 hooks: - id: blacken-docs additional_dependencies: [black==24.3.0] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.0 + rev: v0.6.3 hooks: - id: ruff-format - types_or: [ python, pyi, jupyter ] - id: ruff - types_or: [ python, pyi, jupyter ] - args: [--fix, "--show-fixes"] + args: [ + --fix, + --show-fixes, + # the following line can be removed after support for Python 3.9 is dropped + --extend-select=B905, # zip-without-explicit-strict + ] - repo: https://github.com/pre-commit/pygrep-hooks rev: v1.10.0 @@ -50,7 +53,7 @@ repos: - id: rst-backticks - repo: https://github.com/MarcoGorelli/cython-lint - rev: v0.16.0 + rev: v0.16.2 hooks: - id: cython-lint args: [--no-pycodestyle] diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 24fda87f6a..033d76df9f 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -450,7 +450,7 @@ For all types of contributions, it is required that all tests pass, or that all future. (See :ref:`testing`) * At a minimum, a minimal, self-contained example demonstrating the bug should because included in the body of the Pull Request, or as part of an - indepedent issue. + independent issue. When submitting, you will be asked to make sure that your changes meet all of these requirements. They are pretty easy to meet, and we're also happy to help diff --git a/MANIFEST.in b/MANIFEST.in index 14e2df79d9..442aa58d9e 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -8,9 +8,54 @@ include yt/utilities/mesh_types.yaml exclude yt/utilities/lib/cykdtree/c_kdtree.cpp prune tests prune answer-store -recursive-include yt *.py *.pyx *.pxi *.pxd *.h *.hpp README* *.txt LICENSE* *.cu +recursive-include yt *.py *.pyx *.pxi *.pxd README* *.txt LICENSE* *.cu recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx + + +# start with excluding all C/C++ files +recursive-exclude yt *.h *.c *.hpp *.cpp + +# then include back every non-generated C/C++ source file +# the list can be generated by the following command +# git ls-files | grep -E '\.(h|c)(pp)?$' +include yt/frontends/artio/artio_headers/artio.c +include yt/frontends/artio/artio_headers/artio.h +include yt/frontends/artio/artio_headers/artio_endian.c +include yt/frontends/artio/artio_headers/artio_endian.h +include yt/frontends/artio/artio_headers/artio_file.c +include yt/frontends/artio/artio_headers/artio_grid.c +include yt/frontends/artio/artio_headers/artio_internal.h +include yt/frontends/artio/artio_headers/artio_mpi.c +include yt/frontends/artio/artio_headers/artio_mpi.h +include yt/frontends/artio/artio_headers/artio_parameter.c +include yt/frontends/artio/artio_headers/artio_particle.c +include yt/frontends/artio/artio_headers/artio_posix.c +include yt/frontends/artio/artio_headers/artio_selector.c +include yt/frontends/artio/artio_headers/artio_sfc.c +include yt/frontends/artio/artio_headers/cosmology.c +include yt/frontends/artio/artio_headers/cosmology.h +include yt/geometry/vectorized_ops.h +include yt/utilities/lib/_octree_raytracing.hpp +include yt/utilities/lib/cykdtree/c_kdtree.cpp +include yt/utilities/lib/cykdtree/c_kdtree.hpp +include yt/utilities/lib/cykdtree/c_utils.cpp +include yt/utilities/lib/cykdtree/c_utils.hpp +include yt/utilities/lib/cykdtree/windows/stdint.h +include yt/utilities/lib/endian_swap.h +include yt/utilities/lib/fixed_interpolator.cpp +include yt/utilities/lib/fixed_interpolator.hpp +include yt/utilities/lib/marching_cubes.h +include yt/utilities/lib/mesh_triangulation.h +include yt/utilities/lib/origami_tags.c +include yt/utilities/lib/origami_tags.h +include yt/utilities/lib/pixelization_constants.cpp +include yt/utilities/lib/pixelization_constants.hpp +include yt/utilities/lib/platform_dep.h +include yt/utilities/lib/platform_dep_math.hpp +include yt/utilities/lib/tsearch.c +include yt/utilities/lib/tsearch.h + include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex exclude doc/cheatsheet.pdf include doc/extensions/README doc/Makefile @@ -25,5 +70,6 @@ include yt/default.mplstyle prune yt/frontends/_skeleton recursive-include yt/frontends/amrvac *.par +recursive-exclude requirements *.txt exclude .codecov.yml .coveragerc .git-blame-ignore-revs .gitmodules .hgchurn .mailmap exclude .pre-commit-config.yaml clean.sh nose_answer.cfg nose_unit.cfg nose_ignores.txt diff --git a/conftest.py b/conftest.py index 7c1ecb0959..e845e4e016 100644 --- a/conftest.py +++ b/conftest.py @@ -85,7 +85,6 @@ def pytest_configure(config): Reads in the tests/tests.yaml file. This file contains a list of each answer test's answer file (including the changeset number). """ - ytcfg["yt", "internals", "within_pytest"] = True # Register custom marks for answer tests and big data config.addinivalue_line("markers", "answer_test: Run the answer tests.") config.addinivalue_line( @@ -161,19 +160,6 @@ def pytest_configure(config): ":DeprecationWarning", ) - if find_spec("astropy") is not None: - # at the time of writing, astropy's wheels are behind numpy's latest - # version but this doesn't cause actual problems in our test suite - # last updated with astropy 5.0 + numpy 1.22 + pytest 6.2.5 - config.addinivalue_line( - "filterwarnings", - ( - "ignore:numpy.ndarray size changed, may indicate binary incompatibility. Expected " - r"(80 from C header, got 88|88 from C header, got 96|80 from C header, got 96)" - " from PyObject:RuntimeWarning" - ), - ) - if PANDAS_VERSION is not None and PANDAS_VERSION >= Version("2.2.0"): config.addinivalue_line( "filterwarnings", diff --git a/doc/cheatsheet.tex b/doc/cheatsheet.tex index 911daefe66..fd2355c1ac 100644 --- a/doc/cheatsheet.tex +++ b/doc/cheatsheet.tex @@ -319,7 +319,7 @@ \subsection{Git} \texttt{git status} \textemdash\ Show status of working tree.\\ \texttt{git diff} \textemdash\ Show changed files in the working tree. \\ \texttt{git log} \textemdash\ Show a log of changes in reverse chronological -oder.\\ +order.\\ \texttt{git revert } \textemdash\ Revert the changes in an existing commit and create a new commit with reverted changes. \\ \texttt{git add } \textemdash\ Stage changes in the working tree to diff --git a/doc/source/analyzing/fields.rst b/doc/source/analyzing/fields.rst index 73a872e83a..50529db95b 100644 --- a/doc/source/analyzing/fields.rst +++ b/doc/source/analyzing/fields.rst @@ -368,7 +368,7 @@ second sets the corresponding ``value``. Currently available format properties a .. _efields: -Energy and Momemtum Fields +Energy and Momentum Fields -------------------------- Fields in yt representing energy and momentum quantities follow a specific diff --git a/doc/source/analyzing/generating_processed_data.rst b/doc/source/analyzing/generating_processed_data.rst index 84abf87eb7..5f748d9e25 100644 --- a/doc/source/analyzing/generating_processed_data.rst +++ b/doc/source/analyzing/generating_processed_data.rst @@ -54,7 +54,8 @@ the transformation of a variable mesh of points consisting of positions and sizes into a fixed-size array that appears like an image. This process is that of pixelization, which yt handles transparently internally. You can access this functionality by constructing a -:class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` and supplying +:class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` +and supplying to it your :class:`~yt.data_objects.data_containers.YTSelectionContainer2D` object, as well as some information about how you want the final image to look. You can specify both the bounds of the image (in the appropriate x-y plane) and @@ -62,8 +63,8 @@ the resolution of the output image. You can then have yt pixelize any field you like. .. note:: In previous versions of yt, there was a special class of - FixedResolutionBuffer for off-axis slices. This is no longer - necessary. + FixedResolutionBuffer for off-axis slices. This is still used + for off-axis SPH data projections: OffAxisFixedResolutionBuffer. To create :class:`~yt.data_objects.data_containers.YTSelectionContainer2D` objects, you can access them as described in :ref:`data-objects`, specifically the section @@ -99,7 +100,10 @@ this, see :ref:`saving-grid-data-containers`. In the FITS case, there is an option for setting the ``units`` of the coordinate system in the file. If you want to overwrite a file with the same name, set ``clobber=True``. -The :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` can even be exported +The :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` +(and its +:class:`~yt.visualization.fixed_resolution.OffAxisProjectionFixedResolutionBuffer` +subclass) can even be exported as a 2D dataset itself, which may be operated on in the same way as any other dataset in yt: .. code-block:: python diff --git a/doc/source/developing/building_the_docs.rst b/doc/source/developing/building_the_docs.rst index e1b5629802..ef8d08b8c6 100644 --- a/doc/source/developing/building_the_docs.rst +++ b/doc/source/developing/building_the_docs.rst @@ -90,7 +90,7 @@ the top level of a local copy, run .. code-block:: bash - $ python -m pip install -e ".[doc]" + $ python -m pip install -e . -r requirements/docs.txt Quick versus Full Documentation Builds ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/developing/creating_frontend.rst b/doc/source/developing/creating_frontend.rst index 2678d3abec..c11d1feba0 100644 --- a/doc/source/developing/creating_frontend.rst +++ b/doc/source/developing/creating_frontend.rst @@ -56,7 +56,7 @@ called ``_is_valid()`` that lets the ``yt.load`` method help identify an input file as belonging to *this* particular ``Dataset`` subclass (see :ref:`data-format-detection`). For the most part, the examples of -``yt.frontends.boxlib.data_structures.OrionDataset`` and +``yt.frontends.amrex.data_structures.OrionDataset`` and ``yt.frontends.enzo.data_structures.EnzoDataset`` should be followed, but ``yt.frontends.chombo.data_structures.ChomboDataset``, as a slightly newer addition, can also be used as an instructive example. @@ -64,7 +64,8 @@ slightly newer addition, can also be used as an instructive example. A new set of fields must be added in the file ``fields.py`` in your new directory. For the most part this means subclassing ``FieldInfoContainer`` and adding the necessary fields specific to -your code. Here is a snippet from the base BoxLib field container: +your code. Here is a snippet from the base BoxLib field container (defined in +``yt.frontends.amrex.fields``): .. code-block:: python @@ -273,7 +274,7 @@ that is needed: Even one of the more complex grid objects, -``yt.frontends.boxlib.BoxlibGrid``, is still relatively simple. +``yt.frontends.amrex.BoxlibGrid``, is still relatively simple. Data Reading Functions ---------------------- @@ -310,7 +311,7 @@ At a minimum, one should also override the following methods If your dataset has particle information, you'll want to override the ``_read_particle_coords()`` and ``read_particle_fields()`` methods as well. Each code is going to read data from disk in a different -fashion, but the ``yt.frontends.boxlib.io.IOHandlerBoxlib`` is a +fashion, but the ``yt.frontends.amrex.io.IOHandlerBoxlib`` is a decent place to start. And that just about covers it. Please feel free to email diff --git a/doc/source/developing/debugdrive.rst b/doc/source/developing/debugdrive.rst index 3ed352a2fa..e8fa885437 100644 --- a/doc/source/developing/debugdrive.rst +++ b/doc/source/developing/debugdrive.rst @@ -66,7 +66,7 @@ will induce the requested behavior. SIGUSR1 This will cause the python code to print a stack trace, showing exactly where in the function stack it is currently executing. - SIGUSR1 + SIGUSR2 This will cause the python code to insert an IPython session wherever it currently is, with all local variables in the local namespace. It should allow you to change the state variables. diff --git a/doc/source/developing/testing.rst b/doc/source/developing/testing.rst index 1bea29f1b4..eab8270fe5 100644 --- a/doc/source/developing/testing.rst +++ b/doc/source/developing/testing.rst @@ -152,6 +152,15 @@ More pytest options can be found by using the ``--help`` flag Answer Testing -------------- +.. note:: + This section documents answer tests run with ``pytest``. The plan is to + switch to using ``pytest`` for answer tests at some point in the future, + but currently (July 2024), answer tests are still implemented and run with + ``nose``. We generally encourage developers to use ``pytest`` for any new + tests, but if you need to change or update one of the older ``nose`` + tests, or are, e.g., writing a new frontend, + an `older version of this documentation `_ + decribes how the ``nose`` tests work. What Do Answer Tests Do ^^^^^^^^^^^^^^^^^^^^^^^ @@ -432,24 +441,26 @@ Handling yt Dependencies ------------------------ Our dependencies are specified in ``pyproject.toml``. Hard dependencies are found in -``options.install_requires``, while optional dependencies are specified in -``options.extras_require``. The ``full`` target contains the specs to run our +``project.dependencies``, while optional dependencies are specified in +``project.optional-dependencies``. The ``full`` target contains the specs to run our test suite, which are intended to be as modern as possible (we don't set upper -limits to versions unless we need to). The ``minimal`` target is used to check -that we don't break backward compatibility with old versions of upstream -projects by accident. It is intended to pin strictly our minimal supported -versions. The ``test`` target specifies the tools neeed to run the tests, but +limits to versions unless we need to). + +The ``test`` target specifies the tools needed to run the tests, but not needed by yt itself. +Documentation and typechecking requirements are found in ``requirements/``, +and used in ``tests/ci_install.sh``. + **Python version support.** -When a new Python version is released, it takes about -a month or two for yt to support it, since we're dependent on bigger projects -like numpy and matplotlib. We vow to follow numpy's deprecation plan regarding -our supported versions for Python and numpy, defined formally in `NEP 29 -`_. However, we try to -avoid bumping our minimal requirements shortly before a yt release. +We vow to follow numpy's deprecation plan regarding our supported versions for Python +and numpy, defined formally in +`NEP 29 `_, but generally +support larger version intervals than recommended in this document. **Third party dependencies.** +We attempt to make yt compatible with a wide variety of upstream software +versions. However, sometimes a specific version of a project that yt depends on causes some breakage and must be blacklisted in the tests or a more experimental project that yt depends on optionally might change sufficiently @@ -457,29 +468,20 @@ that the yt community decides not to support an old version of that project. **Note.** Some of our optional dependencies are not trivial to install and their support -may vary across platforms. To manage such issue, we currently use requirement -files in additions to ``pyproject.toml``. They are found in -``tests/*requirements.txt`` and used in ``tests/ci_install.sh``. - -We attempt to make yt compatible with a wide variety of upstream software -versions. However, sometimes a specific version of a project that yt depends on -causes some breakage and must be blacklisted in the tests or a more -experimental project that yt depends on optionally might change sufficiently -that the yt community decides not to support an old version of that project. - -To handle cases like this, the versions of upstream software projects installed -on the machines running the yt test suite are pinned to specific version -numbers that must be updated manually. This prevents breaking the yt tests when -a new version of an upstream dependency is released and allows us to manage -updates in upstream projects at our pace. +may vary across platforms. If you would like to add a new dependency for yt (even an optional dependency) or would like to update a version of a yt dependency, you must edit the -``tests/test_requirements.txt`` file, this path is relative to the root of the -repository. This file contains an enumerated list of direct dependencies and -pinned version numbers. For new dependencies, simply append the name of the new +``pyproject.toml`` file. For new dependencies, simply append the name of the new dependency to the end of the file, along with a pin to the latest version number of the package. To update a package's version, simply update the version number in the entry for that package. -Finally, we also run a set of tests with "minimal" dependencies installed. When adding tests that depend on an optional dependency, you can wrap the test with the ``yt.testing.requires_module decorator`` to ensure it does not run during the minimal dependency tests (see yt/frontends/amrvac/tests/test_read_amrvac_namelist.py for a good example). If for some reason you need to update the listing of packages that are installed for the "minimal" dependency tests, you will need to edit ``tests/test_minimal_requirements.txt``. +Finally, we also run a set of tests with "minimal" dependencies installed. +When adding tests that depend on an optional dependency, you can wrap the test +with the ``yt.testing.requires_module decorator`` to ensure it does not run +during the minimal dependency tests (see +``yt/frontends/amrvac/tests/test_read_amrvac_namelist.py`` for a good example). +If for some reason you need to update the listing of packages that are installed +for the "minimal" dependency tests, you will need to update +``requirements/minimal_env.txt``. diff --git a/doc/source/examining/loading_data.rst b/doc/source/examining/loading_data.rst index 605614b2de..0415712367 100644 --- a/doc/source/examining/loading_data.rst +++ b/doc/source/examining/loading_data.rst @@ -558,8 +558,8 @@ using a ``parameters`` dict, accepting the following keys: AMReX / BoxLib Data ------------------- -AMReX and BoxLib share a frontend (currently named ``boxlib``), since -the file format nearly identical. yt has been tested with AMReX/BoxLib +AMReX and BoxLib share a frontend, since +the file format is nearly identical. yt has been tested with AMReX/BoxLib data generated by Orion, Nyx, Maestro, Castro, IAMR, and WarpX. Currently it is cared for by a combination of Andrew Myers, Matthew Turk, and Mike Zingale. diff --git a/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb b/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb index 49592f19f1..351719dce7 100644 --- a/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb +++ b/doc/source/quickstart/4)_Data_Objects_and_Time_Series.ipynb @@ -337,6 +337,8 @@ "\n", "There are two different types of covering grids: unsmoothed and smoothed. Smoothed grids will be filled through a cascading interpolation process; they will be filled at level 0, interpolated to level 1, filled at level 1, interpolated to level 2, filled at level 2, etc. This will help to reduce edge effects. Unsmoothed covering grids will not be interpolated, but rather values will be duplicated multiple times.\n", "\n", + "For SPH datasets, the covering grid gives the SPH-interpolated value of a field at each grid cell center. This is done for unsmoothed grids; smoothed grids are not available for SPH data.\n", + "\n", "Here we create an unsmoothed covering grid at level 2, with the left edge at `[0.0, 0.0, 0.0]` and with dimensions equal to those that would cover the entire domain at level 2. We can then ask for the Density field, which will be a 3D array." ] }, @@ -385,13 +387,13 @@ ], "metadata": { "kernelspec": { - "name": "python3", "display_name": "Python 3.9.5 64-bit ('yt-dev': pyenv)", "metadata": { "interpreter": { "hash": "14363bd97bed451d1329fb3e06aa057a9e955a9421c5343dd7530f5497723a41" } - } + }, + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/doc/source/reference/api/api.rst b/doc/source/reference/api/api.rst index 61b63f2c15..3cc192c578 100644 --- a/doc/source/reference/api/api.rst +++ b/doc/source/reference/api/api.rst @@ -217,18 +217,18 @@ AMReX/Boxlib .. autosummary:: - ~yt.frontends.boxlib.data_structures.BoxlibGrid - ~yt.frontends.boxlib.data_structures.BoxlibHierarchy - ~yt.frontends.boxlib.data_structures.BoxlibDataset - ~yt.frontends.boxlib.data_structures.CastroDataset - ~yt.frontends.boxlib.data_structures.MaestroDataset - ~yt.frontends.boxlib.data_structures.NyxHierarchy - ~yt.frontends.boxlib.data_structures.NyxDataset - ~yt.frontends.boxlib.data_structures.OrionHierarchy - ~yt.frontends.boxlib.data_structures.OrionDataset - ~yt.frontends.boxlib.fields.BoxlibFieldInfo - ~yt.frontends.boxlib.io.IOHandlerBoxlib - ~yt.frontends.boxlib.io.IOHandlerOrion + ~yt.frontends.amrex.data_structures.BoxlibGrid + ~yt.frontends.amrex.data_structures.BoxlibHierarchy + ~yt.frontends.amrex.data_structures.BoxlibDataset + ~yt.frontends.amrex.data_structures.CastroDataset + ~yt.frontends.amrex.data_structures.MaestroDataset + ~yt.frontends.amrex.data_structures.NyxHierarchy + ~yt.frontends.amrex.data_structures.NyxDataset + ~yt.frontends.amrex.data_structures.OrionHierarchy + ~yt.frontends.amrex.data_structures.OrionDataset + ~yt.frontends.amrex.fields.BoxlibFieldInfo + ~yt.frontends.amrex.io.IOHandlerBoxlib + ~yt.frontends.amrex.io.IOHandlerOrion CfRadial ^^^^^^^^ diff --git a/doc/source/visualizing/plots.rst b/doc/source/visualizing/plots.rst index ec7b6109a5..72e5aac758 100644 --- a/doc/source/visualizing/plots.rst +++ b/doc/source/visualizing/plots.rst @@ -293,8 +293,8 @@ argument. Optionally, a ``north_vector`` can be specified to fix the orientation of the image plane. .. note:: Not every data types have support for off-axis slices yet. - Currently, this operation is supported for grid based data with cartesian geometry. - In some cases (like SPH data) an off-axis projection over a thin region might be used instead. + Currently, this operation is supported for grid based and SPH data with cartesian geometry. + In some cases an off-axis projection over a thin region might be used instead. .. _projection-plots: @@ -433,6 +433,8 @@ by applying the In this use case, the volume renderer casts a set of plane parallel rays, one for each pixel in the image. The data values along each ray are summed, creating the final image buffer. +For SPH datsets, the coordinates are instead simply rotated before the axis-aligned +projection function is applied. .. _off-axis-projection-function: @@ -652,10 +654,6 @@ simply pass ``all`` as the first argument of the field tuple: Additional Notes for Plotting Particle Data ------------------------------------------- -An important caveat when visualizing particle data is that off-axis slice plotting is -not available for any particle data. However, axis-aligned slice plots (as described in -:ref:`slice-plots`) will work. - Since version 4.2.0, off-axis projections ares supported for non-SPH particle data. Previous to that, this operation was only supported for SPH particles. Two historical workaround methods were available for plotting non-SPH particles with off-axis diff --git a/nose_ignores.txt b/nose_ignores.txt index 09ad4a1259..b5529a6ecc 100644 --- a/nose_ignores.txt +++ b/nose_ignores.txt @@ -43,3 +43,7 @@ --ignore-file=test_alt_ray_tracers\.py --ignore-file=test_minimal_representation\.py --ignore-file=test_set_log_level\.py +--ignore-file=test_field_parsing\.py +--ignore-file=test_disks\.py +--ignore-file=test_offaxisprojection_pytestonly\.py +--ignore-file=test_sph_pixelization_pytestonly\.py diff --git a/pyproject.toml b/pyproject.toml index 168c11aed2..08d31233d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,10 +6,10 @@ requires = [ # for the upper pin in Cython # see https://github.com/yt-project/yt/issues/4044 "Cython>=3.0.3, <3.1", - "numpy>=2.0.0rc1", + "numpy>=2.0.0", "ewah-bool-utils>=1.2.0", ] -build-backend = "setuptools.build_meta:__legacy__" +build-backend = "setuptools.build_meta" [project] name = "yt" @@ -34,6 +34,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Topic :: Scientific/Engineering :: Astronomy", "Topic :: Scientific/Engineering :: Physics", "Topic :: Scientific/Engineering :: Visualization", @@ -42,13 +43,16 @@ keywords = [ "astronomy astrophysics visualization amr adaptivemeshrefinement", ] requires-python = ">=3.9.2" + +# keep in sync with requirements/minimal_env.txt dependencies = [ "cmyt>=1.1.2", "ewah-bool-utils>=1.2.0", - "ipywidgets>=8.0.0", "matplotlib>=3.5", "more-itertools>=8.4", "numpy>=1.19.3, <3", # keep minimal requirement in sync with NPY_TARGET_VERSION + # https://github.com/numpy/numpy/issues/27037 + "numpy!=2.0.1 ; platform_machine=='arm64' and platform_system=='Darwin'", "packaging>=20.9", "pillow>=8.0.0", "tomli-w>=0.4.0", @@ -77,7 +81,7 @@ answer-testing = "yt.utilities.answer_testing.framework:AnswerTesting" [project.optional-dependencies] # some generic, reusable constraints on optional-deps -HDF5 = ["h5py>=3.1.0"] +HDF5 = ["h5py>=3.1.0,!=3.12.0; platform_system=='Windows'"] # see https://github.com/h5py/h5py/issues/2505 netCDF4 = ["netCDF4!=1.6.1,>=1.5.3"] # see https://github.com/Unidata/netcdf4-python/issues/1192 Fortran = ["f90nml>=1.1"] @@ -89,6 +93,7 @@ Fortran = ["f90nml>=1.1"] # We also normalize all target names to lower case for consistency. adaptahop = [] ahf = [] +amrex = [] amrvac = ["yt[Fortran]"] art = [] arepo = ["yt[HDF5]"] @@ -133,7 +138,8 @@ full = [ "cartopy>=0.22.0", "firefly>=3.2.0", "glueviz>=0.13.3", - "ipython>=2.0.0", + "ipython>=7.16.2", + "ipywidgets>=8.0.0", "miniballcpp>=0.2.1", "mpi4py>=3.0.3", "pandas>=1.1.2", @@ -146,6 +152,7 @@ full = [ "ratarmount~=0.8.1;platform_system!='Windows' and platform_system!='Darwin'", "yt[adaptahop]", "yt[ahf]", + "yt[amrex]", "yt[amrvac]", "yt[art]", "yt[arepo]", @@ -186,37 +193,9 @@ full = [ ] # dev-only extra targets -doc = [ - "alabaster>=0.7.13", - "bottle>=0.12.25", - "jinja2<3.1.0", # see https://github.com/readthedocs/readthedocs.org/issues/9037 - "jupyter-client>=8.3.1", - "nbsphinx>=0.9.3", - "nose~=1.3.7; python_version < '3.10'", - "pytest>=6.1", - "pyx>=0.15", - "sphinx>=7.2.5", - "sphinx-bootstrap-theme>=0.8.1", - "sphinx-rtd-theme>=1.3.0", -] mapserver = [ "bottle", ] -minimal = [ - "cmyt==1.1.2", - "ewah-bool-utils==1.2.0", - "ipywidgets==8.0.0", - "matplotlib==3.5", - "more-itertools==8.4", - "numpy==1.19.3", - "packaging==20.9", - "pillow==8.0.0", - "tomli-w==0.4.0", - "tqdm==3.4.0", - "unyt==2.9.2", - "tomli==1.2.3;python_version < '3.11'", - "typing-extensions==4.4.0;python_version < '3.12'", -] test = [ "pyaml>=17.10.0", "pytest>=6.1", @@ -225,13 +204,7 @@ test = [ "nose~=1.3.7; python_version < '3.10'", "nose-exclude; python_version < '3.10'", "nose-timer~=1.0.0; python_version < '3.10'", -] -typecheck = [ - "mypy==1.5.1", - "types-PyYAML==6.0.12.2", - "types-chardet==5.0.4", - "types-requests==2.28.11.5", - "typing-extensions==4.4.0; python_version < '3.12'", + "imageio!=2.35.0", # see https://github.com/yt-project/yt/issues/4966 ] [project.scripts] @@ -266,7 +239,6 @@ exclude = ''' ''' [tool.ruff] -extend-include = ["*.ipynb"] exclude = [ "doc", "benchmarks", @@ -290,15 +262,18 @@ select = [ "C4", # flake8-comprehensions "B", # flake8-bugbear "G", # flake8-logging-format + "TCH", # flake8-type-checking "YTT", # flake8-2020 "UP", # pyupgrade "I", # isort "NPY", # numpy specific rules + "RUF031"# incorrectly-parenthesized-tuple-in-subscript ] ignore = [ "E501", # line too long "E741", # Do not use variables named 'I', 'O', or 'l' "B018", # Found useless expression. # disabled because ds.index is idiomatic + "UP038", # non-pep604-isinstance ] [tool.ruff.lint.per-file-ignores] @@ -333,87 +308,87 @@ addopts = ''' -s -v -rsfE - --ignore-glob='*_nose.py' - --ignore='yt/data_objects/level_sets/tests/test_clump_finding.py' - --ignore='yt/data_objects/tests/test_connected_sets.py' - --ignore='yt/data_objects/tests/test_data_containers.py' - --ignore='yt/data_objects/tests/test_dataset_access.py' - --ignore='yt/data_objects/tests/test_disks.py' - --ignore='yt/data_objects/tests/test_particle_filter.py' - --ignore='yt/data_objects/tests/test_particle_trajectories.py' - --ignore='yt/data_objects/tests/test_pickling.py' - --ignore='yt/data_objects/tests/test_regions.py' - --ignore='yt/fields/tests/test_particle_fields.py' - --ignore='yt/fields/tests/test_vector_fields.py' - --ignore='yt/fields/tests/test_xray_fields.py' - --ignore='yt/frontends/adaptahop/tests/test_outputs.py' - --ignore='yt/frontends/ahf/tests/test_outputs.py' - --ignore='yt/frontends/amrvac/tests/test_outputs.py' - --ignore='yt/frontends/amrvac/tests/test_units_override.py' - --ignore='yt/frontends/arepo/tests/test_outputs.py' - --ignore='yt/frontends/art/tests/test_outputs.py' - --ignore='yt/frontends/artio/tests/test_outputs.py' - --ignore='yt/frontends/athena/tests/test_outputs.py' - --ignore='yt/frontends/athena_pp/tests/test_outputs.py' - --ignore='yt/frontends/boxlib/tests/test_outputs.py' - --ignore='yt/frontends/cf_radial/tests/test_outputs.py' - --ignore='yt/frontends/chimera/tests/test_outputs.py' - --ignore='yt/frontends/cholla/tests/test_outputs.py' - --ignore='yt/frontends/chombo/tests/test_outputs.py' - --ignore='yt/frontends/eagle/tests/test_outputs.py' - --ignore='yt/frontends/enzo/tests/test_outputs.py' - --ignore='yt/frontends/enzo_e/tests/test_outputs.py' - --ignore='yt/frontends/exodus_ii/tests/test_outputs.py' - --ignore='yt/frontends/fits/tests/test_outputs.py' - --ignore='yt/frontends/flash/tests/test_outputs.py' - --ignore='yt/frontends/gadget/tests/test_outputs.py' - --ignore='yt/frontends/gadget_fof/tests/test_outputs.py' - --ignore='yt/frontends/gamer/tests/test_outputs.py' - --ignore='yt/frontends/gdf/tests/test_outputs.py' - --ignore='yt/frontends/gdf/tests/test_outputs_nose.py' - --ignore='yt/frontends/gizmo/tests/test_outputs.py' - --ignore='yt/frontends/halo_catalog/tests/test_outputs.py' - --ignore='yt/frontends/moab/tests/test_c5.py' - --ignore='yt/frontends/nc4_cm1/tests/test_outputs.py' - --ignore='yt/frontends/open_pmd/tests/test_outputs.py' - --ignore='yt/frontends/owls/tests/test_outputs.py' - --ignore='yt/frontends/owls_subfind/tests/test_outputs.py' - --ignore='yt/frontends/ramses/tests/test_outputs.py' - --ignore='yt/frontends/rockstar/tests/test_outputs.py' - --ignore='yt/frontends/tipsy/tests/test_outputs.py' - --ignore='yt/frontends/ytdata/tests/test_old_outputs.py' - --ignore='yt/frontends/ytdata/tests/test_outputs.py' - --ignore='yt/frontends/ytdata/tests/test_unit.py' - --ignore='yt/geometry/coordinates/tests/test_axial_pixelization.py' - --ignore='yt/geometry/coordinates/tests/test_cylindrical_coordinates.py' - --ignore='yt/geometry/coordinates/tests/test_spherical_coordinates.py' - --ignore='yt/tests/test_funcs.py' - --ignore='yt/utilities/lib/cykdtree/tests/__init__.py' - --ignore='yt/utilities/lib/cykdtree/tests/test_kdtree.py' - --ignore='yt/utilities/lib/cykdtree/tests/test_plot.py' - --ignore='yt/utilities/lib/cykdtree/tests/test_utils.py' - --ignore='yt/utilities/tests/test_cosmology.py' - --ignore='yt/visualization/tests/test_callbacks.py' - --ignore='yt/visualization/tests/test_color_maps.py' - --ignore='yt/visualization/tests/test_geo_projections.py' - --ignore='yt/visualization/tests/test_image_writer.py' - --ignore='yt/visualization/tests/test_line_plots.py' - --ignore='yt/visualization/tests/test_mesh_slices.py' - --ignore='yt/visualization/tests/test_norm_api_custom_norm.py' - --ignore='yt/visualization/tests/test_norm_api_inf_zlim.py' - --ignore='yt/visualization/tests/test_norm_api_lineplot.py' - --ignore='yt/visualization/tests/test_norm_api_particleplot.py' - --ignore='yt/visualization/tests/test_norm_api_phaseplot_set_colorbar_explicit.py' - --ignore='yt/visualization/tests/test_norm_api_phaseplot_set_colorbar_implicit.py' - --ignore='yt/visualization/tests/test_norm_api_profileplot.py' - --ignore='yt/visualization/tests/test_norm_api_set_background_color.py' - --ignore='yt/visualization/tests/test_particle_plot.py' - --ignore='yt/visualization/tests/test_plot_modifications.py' - --ignore='yt/visualization/tests/test_plotwindow.py' - --ignore='yt/visualization/tests/test_profile_plots.py' - --ignore='yt/visualization/tests/test_raw_field_slices.py' - --ignore='yt/visualization/volume_rendering/tests/test_mesh_render.py' - --ignore='yt/visualization/volume_rendering/tests/test_vr_orientation.py' + --ignore-glob='/*_nose.py' + --ignore-glob='/*/yt/data_objects/level_sets/tests/test_clump_finding.py' + --ignore-glob='/*/yt/data_objects/tests/test_connected_sets.py' + --ignore-glob='/*/yt/data_objects/tests/test_data_containers.py' + --ignore-glob='/*/yt/data_objects/tests/test_dataset_access.py' + --ignore-glob='/*/yt/data_objects/tests/test_particle_filter.py' + --ignore-glob='/*/yt/data_objects/tests/test_particle_trajectories.py' + --ignore-glob='/*/yt/data_objects/tests/test_pickling.py' + --ignore-glob='/*/yt/data_objects/tests/test_regions.py' + --ignore-glob='/*/yt/fields/tests/test_particle_fields.py' + --ignore-glob='/*/yt/fields/tests/test_vector_fields.py' + --ignore-glob='/*/yt/fields/tests/test_xray_fields.py' + --ignore-glob='/*/yt/frontends/adaptahop/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/ahf/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/amrex/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/amrvac/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/amrvac/tests/test_units_override.py' + --ignore-glob='/*/yt/frontends/arepo/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/art/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/artio/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/athena/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/athena_pp/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/boxlib/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/cf_radial/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/chimera/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/cholla/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/chombo/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/eagle/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/enzo/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/enzo_e/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/exodus_ii/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/fits/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/flash/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/gadget/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/gadget_fof/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/gamer/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/gdf/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/gdf/tests/test_outputs_nose.py' + --ignore-glob='/*/yt/frontends/gizmo/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/halo_catalog/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/moab/tests/test_c5.py' + --ignore-glob='/*/yt/frontends/nc4_cm1/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/open_pmd/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/owls/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/owls_subfind/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/ramses/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/rockstar/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/tipsy/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/ytdata/tests/test_old_outputs.py' + --ignore-glob='/*/yt/frontends/ytdata/tests/test_outputs.py' + --ignore-glob='/*/yt/frontends/ytdata/tests/test_unit.py' + --ignore-glob='/*/yt/geometry/coordinates/tests/test_axial_pixelization.py' + --ignore-glob='/*/yt/geometry/coordinates/tests/test_cylindrical_coordinates.py' + --ignore-glob='/*/yt/geometry/coordinates/tests/test_spherical_coordinates.py' + --ignore-glob='/*/yt/tests/test_funcs.py' + --ignore-glob='/*/yt/utilities/lib/cykdtree/tests/__init__.py' + --ignore-glob='/*/yt/utilities/lib/cykdtree/tests/test_kdtree.py' + --ignore-glob='/*/yt/utilities/lib/cykdtree/tests/test_plot.py' + --ignore-glob='/*/yt/utilities/lib/cykdtree/tests/test_utils.py' + --ignore-glob='/*/yt/utilities/tests/test_cosmology.py' + --ignore-glob='/*/yt/visualization/tests/test_callbacks.py' + --ignore-glob='/*/yt/visualization/tests/test_color_maps.py' + --ignore-glob='/*/yt/visualization/tests/test_geo_projections.py' + --ignore-glob='/*/yt/visualization/tests/test_image_writer.py' + --ignore-glob='/*/yt/visualization/tests/test_line_plots.py' + --ignore-glob='/*/yt/visualization/tests/test_mesh_slices.py' + --ignore-glob='/*/yt/visualization/tests/test_norm_api_custom_norm.py' + --ignore-glob='/*/yt/visualization/tests/test_norm_api_inf_zlim.py' + --ignore-glob='/*/yt/visualization/tests/test_norm_api_lineplot.py' + --ignore-glob='/*/yt/visualization/tests/test_norm_api_particleplot.py' + --ignore-glob='/*/yt/visualization/tests/test_norm_api_phaseplot_set_colorbar_explicit.py' + --ignore-glob='/*/yt/visualization/tests/test_norm_api_phaseplot_set_colorbar_implicit.py' + --ignore-glob='/*/yt/visualization/tests/test_norm_api_profileplot.py' + --ignore-glob='/*/yt/visualization/tests/test_norm_api_set_background_color.py' + --ignore-glob='/*/yt/visualization/tests/test_particle_plot.py' + --ignore-glob='/*/yt/visualization/tests/test_plot_modifications.py' + --ignore-glob='/*/yt/visualization/tests/test_plotwindow.py' + --ignore-glob='/*/yt/visualization/tests/test_profile_plots.py' + --ignore-glob='/*/yt/visualization/tests/test_raw_field_slices.py' + --ignore-glob='/*/yt/visualization/volume_rendering/tests/test_mesh_render.py' + --ignore-glob='/*/yt/visualization/volume_rendering/tests/test_vr_orientation.py' ''' @@ -489,3 +464,21 @@ warn_unused_ignores = true warn_unreachable = true show_error_context = true exclude = "(test_*|lodgeit)" + +[tool.cibuildwheel] +build = "cp39-* cp310-* cp311-* cp312-* cp313-*" +build-verbosity = 1 +test-skip = "*-musllinux*" +test-extras = "test" +test-command = [ + "pytest -c {project}/pyproject.toml --rootdir . --color=yes --pyargs yt -ra", +] + +[tool.cibuildwheel.linux] +archs = "x86_64" + +[tool.cibuildwheel.macos] +archs = "auto" + +[tool.cibuildwheel.windows] +archs = "auto64" diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100644 index 0000000000..e577a5b968 --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,12 @@ +alabaster>=0.7.13 +bottle>=0.12.25 +ipykernel>=6.29.4 +jinja2<3.1.0 # see https://github.com/readthedocs/readthedocs.org/issues/9037 +jupyter-client>=8.3.1 +nbsphinx>=0.9.3 +nose~=1.3.7; python_version < '3.10' +pytest>=6.1 +pyx>=0.15 +sphinx>=7.2.5 +sphinx-bootstrap-theme>=0.8.1 +sphinx-rtd-theme>=1.3.0 diff --git a/requirements/minimal_env.txt b/requirements/minimal_env.txt new file mode 100644 index 0000000000..086d0076d2 --- /dev/null +++ b/requirements/minimal_env.txt @@ -0,0 +1,69 @@ +# This file was autogenerated by uv via the following command: +# uv pip compile pyproject.toml --python=3.9 --python-platform=x86_64-unknown-linux-gnu --resolution=lowest-direct --no-build +cmyt==1.1.2 + # via yt (pyproject.toml) +colorspacious==1.1.2 + # via cmyt +cycler==0.12.1 + # via matplotlib +ewah-bool-utils==1.2.0 + # via yt (pyproject.toml) +fonttools==4.53.1 + # via matplotlib +kiwisolver==1.4.7 + # via matplotlib +matplotlib==3.5.0 + # via + # yt (pyproject.toml) + # cmyt +more-itertools==8.4.0 + # via + # yt (pyproject.toml) + # cmyt +mpmath==1.3.0 + # via sympy +numpy==1.19.3 + # via + # yt (pyproject.toml) + # cmyt + # colorspacious + # ewah-bool-utils + # matplotlib + # unyt +packaging==20.9 + # via + # yt (pyproject.toml) + # matplotlib + # setuptools-scm +pillow==8.0.0 + # via + # yt (pyproject.toml) + # matplotlib +pyparsing==3.1.4 + # via + # matplotlib + # packaging +python-dateutil==2.9.0.post0 + # via matplotlib +setuptools==74.1.2 + # via setuptools-scm +setuptools-scm==8.1.0 + # via matplotlib +six==1.16.0 + # via python-dateutil +sympy==1.13.2 + # via unyt +tomli==1.2.3 + # via + # yt (pyproject.toml) + # setuptools-scm +tomli-w==0.4.0 + # via yt (pyproject.toml) +tqdm==3.4.0 + # via yt (pyproject.toml) +typing-extensions==4.4.0 + # via + # yt (pyproject.toml) + # setuptools-scm +unyt==2.9.2 + # via yt (pyproject.toml) diff --git a/requirements/typecheck.txt b/requirements/typecheck.txt new file mode 100644 index 0000000000..f116cfe1d5 --- /dev/null +++ b/requirements/typecheck.txt @@ -0,0 +1,5 @@ +mypy==1.11.2 +types-PyYAML==6.0.12.20240808 +types-chardet==5.0.4.6 +types-requests==2.32.0.20240907 +typing-extensions==4.6.0; python_version < '3.12' diff --git a/setup.py b/setup.py index f1d547b183..f77776e337 100644 --- a/setup.py +++ b/setup.py @@ -1,12 +1,18 @@ import glob import os +import sys from collections import defaultdict from distutils.ccompiler import get_default_compiler from importlib import resources as importlib_resources from setuptools import Distribution, setup +# ensure enclosing directory is in PYTHON_PATH to allow importing from setupext.py +if (script_dir := os.path.dirname(__file__)) not in sys.path: + sys.path.insert(0, script_dir) + from setupext import ( + NUMPY_MACROS, check_CPP14_flags, check_for_openmp, check_for_pyembree, @@ -111,6 +117,7 @@ def has_ext_modules(self): { "sources": ["yt/utilities/lib/fixed_interpolator.cpp"], "include_dirs": clib_include_dirs, + "define_macros": NUMPY_MACROS, }, ) diff --git a/setupext.py b/setupext.py index cf5af62812..a9aafdbbf9 100644 --- a/setupext.py +++ b/setupext.py @@ -394,6 +394,13 @@ def get_python_include_dirs(): return include_dirs +NUMPY_MACROS = [ + ("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"), + # keep in sync with runtime requirements (pyproject.toml) + ("NPY_TARGET_VERSION", "NPY_1_19_API_VERSION"), +] + + def create_build_ext(lib_exts, cythonize_aliases): class build_ext(_build_ext): # subclass setuptools extension builder to avoid importing cython and numpy @@ -425,11 +432,7 @@ def finalize_options(self): self.include_dirs.append(numpy.get_include()) self.include_dirs.append(ewah_bool_utils.get_include()) - define_macros = [ - ("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"), - # keep in sync with runtime requirements (pyproject.toml) - ("NPY_TARGET_VERSION", "NPY_1_19_API_VERSION"), - ] + define_macros = NUMPY_MACROS if self.define is None: self.define = define_macros diff --git a/tests/ci_install.sh b/tests/ci_install.sh index b73c90f739..a29d9d5334 100644 --- a/tests/ci_install.sh +++ b/tests/ci_install.sh @@ -32,7 +32,7 @@ fi # but the primary intention is to embed this script in CI jobs if [[ ${dependencies} == "minimal" ]]; then # test with minimal versions of runtime dependencies - python -m pip install -e ".[test,minimal]" + python -m pip install -e ".[test]" -r requirements/minimal_env.txt elif [[ ${dependencies} == "cartopy" ]]; then python -m pip install 'cartopy>=0.22' # scipy is an optional dependency to cartopy diff --git a/tests/pytest_mpl_baseline b/tests/pytest_mpl_baseline index 9162b1a3f1..8a30c989a3 160000 --- a/tests/pytest_mpl_baseline +++ b/tests/pytest_mpl_baseline @@ -1 +1 @@ -Subproject commit 9162b1a3f1bdccbe398221fec8ace489e53078d9 +Subproject commit 8a30c989a30b00e681db645cc767cb9047508169 diff --git a/tests/tests.yaml b/tests/tests.yaml index 5ef62862ac..40836b3381 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -3,6 +3,28 @@ answer_tests: local_art_004: # PR 3081, 3101 - yt/frontends/art/tests/test_outputs.py:test_d9p +#copied from boxlib frontend + local_amrex_012: + - yt/frontends/amrex/tests/test_outputs.py:test_radadvect + - yt/frontends/amrex/tests/test_outputs.py:test_radtube + - yt/frontends/amrex/tests/test_outputs.py:test_star + - yt/frontends/amrex/tests/test_outputs.py:test_OrionDataset + - yt/frontends/amrex/tests/test_outputs.py:test_CastroDataset + - yt/frontends/amrex/tests/test_outputs.py:test_RT_particles + - yt/frontends/amrex/tests/test_outputs.py:test_units_override + - yt/frontends/amrex/tests/test_outputs.py:test_raw_fields + + local_amrex_particles_010: + - yt/frontends/amrex/tests/test_outputs.py:test_LyA + - yt/frontends/amrex/tests/test_outputs.py:test_nyx_particle_io + - yt/frontends/amrex/tests/test_outputs.py:test_castro_particle_io + - yt/frontends/amrex/tests/test_outputs.py:test_langmuir + - yt/frontends/amrex/tests/test_outputs.py:test_plasma + - yt/frontends/amrex/tests/test_outputs.py:test_beam + - yt/frontends/amrex/tests/test_outputs.py:test_warpx_particle_io + - yt/frontends/amrex/tests/test_outputs.py:test_NyxDataset + - yt/frontends/amrex/tests/test_outputs.py:test_WarpXDataset + local_amrvac_009: # PR 2945 - yt/frontends/amrvac/tests/test_outputs.py:test_domain_size - yt/frontends/amrvac/tests/test_outputs.py:test_bw_polar_2d @@ -15,7 +37,7 @@ answer_tests: - yt/frontends/amrvac/tests/test_outputs.py:test_riemann_cartesian_175D - yt/frontends/amrvac/tests/test_outputs.py:test_rmi_cartesian_dust_2D - local_arepo_011: # PR 4419 + local_arepo_013: # PR 4939 - yt/frontends/arepo/tests/test_outputs.py:test_arepo_bullet - yt/frontends/arepo/tests/test_outputs.py:test_arepo_tng59 - yt/frontends/arepo/tests/test_outputs.py:test_arepo_cr @@ -47,7 +69,7 @@ answer_tests: - yt/frontends/chombo/tests/test_outputs.py:test_zp - yt/frontends/chombo/tests/test_outputs.py:test_kho - local_enzo_009: # PR 3856 + local_enzo_011: # PR 4930 - yt/frontends/enzo/tests/test_outputs.py:test_moving7 - yt/frontends/enzo/tests/test_outputs.py:test_galaxy0030 - yt/frontends/enzo/tests/test_outputs.py:test_toro1d @@ -70,7 +92,7 @@ answer_tests: - yt/frontends/flash/tests/test_outputs.py:test_wind_tunnel - yt/frontends/flash/tests/test_outputs.py:test_fid_1to3_b1 - local_gadget_009: # PR 3258 + local_gadget_010: # PR 4939 - yt/frontends/gadget/tests/test_outputs.py:test_iso_collapse - yt/frontends/gadget/tests/test_outputs.py:test_pid_uniqueness - yt/frontends/gadget/tests/test_outputs.py:test_bigendian_field_access @@ -86,7 +108,7 @@ answer_tests: local_gdf_002: - yt/frontends/gdf/tests/test_outputs_nose.py:test_sedov_tunnel - local_gizmo_008: # PR 2909 + local_gizmo_009: # PR 4939 - yt/frontends/gizmo/tests/test_outputs.py:test_gizmo_64 local_halos_012: # PR 3325 @@ -96,7 +118,7 @@ answer_tests: - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5 - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42 - local_owls_008: # PR 2909 + local_owls_009: # PR 4939 - yt/frontends/owls/tests/test_outputs.py:test_snapshot_033 - yt/frontends/owls/tests/test_outputs.py:test_OWLS_particlefilter @@ -108,7 +130,7 @@ answer_tests: - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers - yt/visualization/tests/test_callbacks.py:test_axis_manipulations - local_tipsy_009: # PR 2909 + local_tipsy_010: # PR 4939 - yt/frontends/tipsy/tests/test_outputs.py:test_pkdgrav - yt/frontends/tipsy/tests/test_outputs.py:test_gasoline_dmonly - yt/frontends/tipsy/tests/test_outputs.py:test_tipsy_galaxy @@ -121,23 +143,8 @@ answer_tests: local_boxlib_012: - yt/frontends/boxlib/tests/test_outputs.py:test_radadvect - - yt/frontends/boxlib/tests/test_outputs.py:test_radtube - - yt/frontends/boxlib/tests/test_outputs.py:test_star - - yt/frontends/boxlib/tests/test_outputs.py:test_OrionDataset - - yt/frontends/boxlib/tests/test_outputs.py:test_CastroDataset - - yt/frontends/boxlib/tests/test_outputs.py:test_RT_particles - - yt/frontends/boxlib/tests/test_outputs.py:test_units_override - - yt/frontends/boxlib/tests/test_outputs.py:test_raw_fields local_boxlib_particles_010: - - yt/frontends/boxlib/tests/test_outputs.py:test_LyA - - yt/frontends/boxlib/tests/test_outputs.py:test_nyx_particle_io - - yt/frontends/boxlib/tests/test_outputs.py:test_castro_particle_io - - yt/frontends/boxlib/tests/test_outputs.py:test_langmuir - - yt/frontends/boxlib/tests/test_outputs.py:test_plasma - - yt/frontends/boxlib/tests/test_outputs.py:test_beam - - yt/frontends/boxlib/tests/test_outputs.py:test_warpx_particle_io - - yt/frontends/boxlib/tests/test_outputs.py:test_NyxDataset - yt/frontends/boxlib/tests/test_outputs.py:test_WarpXDataset local_ramses_005: # PR 3856 @@ -180,6 +187,7 @@ other_tests: - "--ignore-file=test_ewah_write_load\\.py" - "--ignore-file=test_external_frontends\\.py" - "--ignore-file=test_field_access_pytest\\.py" + - "--ignore-file=test_field_parsing\\.py" - "--ignore-file=test_file_sanitizer\\.py" - "--ignore-file=test_firefly\\.py" - "--ignore-file=test_geometries\\.py" @@ -213,5 +221,7 @@ other_tests: - "--exclude-test=yt.frontends.gdf.tests.test_outputs.TestGDF" - "--exclude-test=yt.frontends.adaptahop.tests.test_outputs" - "--exclude-test=yt.frontends.stream.tests.test_stream_particles.test_stream_non_cartesian_particles" + - "--ignore-file=test_offaxisprojection_pytestonly\\.py" + - "--ignore-file=test_sph_pixelization_pytestonly\\.py" cookbook: - 'doc/source/cookbook/tests/test_cookbook.py' diff --git a/tests/unpin_requirements.py b/tests/unpin_requirements.py index 0da23061dc..932c11192e 100644 --- a/tests/unpin_requirements.py +++ b/tests/unpin_requirements.py @@ -1,3 +1,10 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = [ +# "tomli ; python_full_version < '3.11'", +# "tomli-w", +# ] +# /// import re import sys diff --git a/yt/_maintenance/backports.py b/yt/_maintenance/backports.py index f3ee778243..78577af3e4 100644 --- a/yt/_maintenance/backports.py +++ b/yt/_maintenance/backports.py @@ -52,7 +52,7 @@ def __new__(cls, *values): if len(values) == 3: # check that errors argument is a string if not isinstance(values[2], str): - raise TypeError("errors must be a string, not %r" % (values[2])) + raise TypeError("errors must be a string, not %r" % (values[2])) # noqa: UP031 value = str(*values) member = str.__new__(cls, value) member._value_ = value diff --git a/yt/_maintenance/ipython_compat.py b/yt/_maintenance/ipython_compat.py new file mode 100644 index 0000000000..e405f44b3b --- /dev/null +++ b/yt/_maintenance/ipython_compat.py @@ -0,0 +1,31 @@ +from importlib.metadata import version +from importlib.util import find_spec + +from packaging.version import Version + +__all__ = [ + "IS_IPYTHON", + "IPYWIDGETS_ENABLED", +] + +IS_IPYTHON: bool +HAS_IPYWIDGETS_GE_8: bool +IPYWIDGETS_ENABLED: bool + + +try: + # this name is only defined if running within ipython/jupyter + __IPYTHON__ # type: ignore [name-defined] # noqa: B018 +except NameError: + IS_IPYTHON = False +else: + IS_IPYTHON = True + + +HAS_IPYWIDGETS_GE_8 = ( + Version(version("ipywidgets")) >= Version("8.0.0") + if find_spec("ipywidgets") is not None + else False +) + +IPYWIDGETS_ENABLED = IS_IPYTHON and HAS_IPYWIDGETS_GE_8 diff --git a/yt/_maintenance/numpy2_compat.py b/yt/_maintenance/numpy2_compat.py index 5f53beb91b..3cc1f01aa1 100644 --- a/yt/_maintenance/numpy2_compat.py +++ b/yt/_maintenance/numpy2_compat.py @@ -9,4 +9,4 @@ if NUMPY_VERSION >= Version("2.0.0dev0"): from numpy import trapezoid as trapezoid # type: ignore [attr-defined] else: - from numpy import trapz as trapezoid # noqa: F401 + from numpy import trapz as trapezoid # type: ignore [attr-defined] # noqa: F401 diff --git a/yt/config.py b/yt/config.py index 758454a4f9..8ae7364145 100644 --- a/yt/config.py +++ b/yt/config.py @@ -45,7 +45,6 @@ "ray_tracing_engine": "yt", "internals": { "within_testing": False, - "within_pytest": False, "parallel": False, "strict_requires": False, "global_parallel_rank": 0, diff --git a/yt/data_objects/analyzer_objects.py b/yt/data_objects/analyzer_objects.py index b6824b6249..b186ac1610 100644 --- a/yt/data_objects/analyzer_objects.py +++ b/yt/data_objects/analyzer_objects.py @@ -1,7 +1,11 @@ import inspect +import sys from yt.utilities.object_registries import analysis_task_registry +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class AnalysisTask: def __init_subclass__(cls, *args, **kwargs): @@ -14,7 +18,7 @@ def __init__(self, *args, **kwargs): # does not override if len(args) + len(kwargs) != len(self._params): raise RuntimeError - self.__dict__.update(zip(self._params, args)) + self.__dict__.update(zip(self._params, args, strict=False)) self.__dict__.update(kwargs) def __repr__(self): @@ -67,7 +71,7 @@ class QuantityProxy(AnalysisTask): def __repr__(self): # Stolen from YTDataContainer.__repr__ s = f"{self.__class__.__name__}: " - s += ", ".join(["%s" % list(self.args)]) + s += ", ".join([str(list(self.args))]) s += ", ".join(f"{k}={v}" for k, v in self.kwargs.items()) return s diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index adcf01532c..38deeb9bbc 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -1,6 +1,7 @@ import fileinput import io import os +import sys import warnings import zipfile from functools import partial, wraps @@ -63,6 +64,9 @@ ) from yt.visualization.color_maps import get_colormap_lut +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class YTStreamline(YTSelectionContainer1D): """ @@ -150,7 +154,9 @@ def _get_cut_mask(self, grid): mask = np.zeros(points_in_grid.sum(), dtype="int64") dts = np.zeros(points_in_grid.sum(), dtype="float64") ts = np.zeros(points_in_grid.sum(), dtype="float64") - for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])): + for mi, (i, pos) in enumerate( + zip(pids, self.positions[points_in_grid], strict=True) + ): if not points_in_grid[i]: continue ci = ((pos - grid.LeftEdge) / grid.dds).astype("int64") @@ -649,6 +655,8 @@ class YTCoveringGrid(YTSelectionContainer3D): level : int The resolution level data to which data will be gridded. Level 0 is the root grid dx for that dataset. + (The grid resolution will be simulation size / 2**level along + each grid axis.) left_edge : array_like The left edge of the region to be extracted. Specify units by supplying a YTArray, otherwise code length units are assumed. @@ -784,7 +792,10 @@ def to_xarray(self, fields=None): def icoords(self): ic = np.indices(self.ActiveDimensions).astype("int64") return np.column_stack( - [i.ravel() + gi for i, gi in zip(ic, self.get_global_startindex())] + [ + i.ravel() + gi + for i, gi in zip(ic, self.get_global_startindex(), strict=True) + ] ) @property @@ -880,10 +891,11 @@ def get_data(self, fields=None): fill, gen, part, alias = self._split_fields(fields_to_get) except NeedsGridType as e: if self._num_ghost_zones == 0: + num_ghost_zones = self._num_ghost_zones raise RuntimeError( "Attempting to access a field that needs ghost zones, but " - "num_ghost_zones = %s. You should create the covering grid " - "with nonzero num_ghost_zones." % self._num_ghost_zones + f"{num_ghost_zones = }. You should create the covering grid " + "with nonzero num_ghost_zones." ) from e else: raise @@ -994,14 +1006,15 @@ def _fill_sph_particles(self, fields): smoothing_style = getattr(self.ds, "sph_smoothing_style", "scatter") normalize = getattr(self.ds, "use_sph_normalization", True) + kernel_name = getattr(self.ds, "kernel_name", "cubic") bounds, size = self._get_grid_bounds_size() period = self.ds.coordinates.period.copy() if hasattr(period, "in_units"): period = period.in_units("code_length").d - # TODO maybe there is a better way of handling this - is_periodic = int(any(self.ds.periodicity)) + # check periodicity per dimension + is_periodic = self.ds.periodicity if smoothing_style == "scatter": for field in fields: @@ -1035,6 +1048,7 @@ def _fill_sph_particles(self, fields): pbar=pbar, check_period=is_periodic, period=period, + kernel_name=kernel_name, ) if normalize: pixelize_sph_kernel_arbitrary_grid( @@ -1050,6 +1064,7 @@ def _fill_sph_particles(self, fields): pbar=pbar, check_period=is_periodic, period=period, + kernel_name=kernel_name, ) if normalize: @@ -1121,7 +1136,7 @@ def _fill_fields(self, fields): if self.comm.size > 1: for i in range(len(fields)): output_fields[i] = self.comm.mpi_allreduce(output_fields[i], op="sum") - for field, v in zip(fields, output_fields): + for field, v in zip(fields, output_fields, strict=True): fi = self.ds._get_field_info(field) self[field] = self.ds.arr(v, fi.units) @@ -1220,7 +1235,7 @@ def write_to_gdf(self, gdf_path, fields, nprocs=1, field_units=None, **kwargs): data[field] = (self[field].in_units(units).v, units) le = self.left_edge.v re = self.right_edge.v - bbox = np.array([[l, r] for l, r in zip(le, re)]) + bbox = np.array([[l, r] for l, r in zip(le, re, strict=True)]) ds = load_uniform_grid( data, self.ActiveDimensions, @@ -1411,7 +1426,7 @@ class YTSmoothedCoveringGrid(YTCoveringGrid): filename = None _min_level = None - @wraps(YTCoveringGrid.__init__) + @wraps(YTCoveringGrid.__init__) # type: ignore [misc] def __init__(self, *args, **kwargs): ds = kwargs["ds"] self._base_dx = ( @@ -1525,7 +1540,7 @@ def _fill_fields(self, fields): stacklevel=1, ) mylog.debug("Caught %d runtime errors.", runtime_errors_count) - for field, v in zip(fields, ls.fields): + for field, v in zip(fields, ls.fields, strict=True): if self.level > 0: v = v[1:-1, 1:-1, 1:-1] fi = self.ds._get_field_info(field) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 26763d5972..dfc6ebd487 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -1,4 +1,5 @@ import abc +import sys import weakref from collections import defaultdict from contextlib import contextmanager @@ -28,13 +29,16 @@ from yt.utilities.on_demand_imports import _firefly as firefly from yt.utilities.parameter_file_storage import ParameterFileStore +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + if TYPE_CHECKING: from yt.data_objects.static_output import Dataset def sanitize_weight_field(ds, field, weight): - field_object = ds._get_field_info(field) if weight is None: + field_object = ds._get_field_info(field) if field_object.sampling_type == "particle": if field_object.name[0] == "gas": ptype = ds._sph_ptypes[0] @@ -87,7 +91,7 @@ def __init__(self, ds: Optional["Dataset"], field_parameters) -> None: # constructor, in which case it will override the default. # This code ensures it is never not set. - self.ds: "Dataset" + self.ds: Dataset if ds is not None: self.ds = ds else: @@ -797,7 +801,7 @@ def create_firefly_object( # tuples containing some sort of special "any" ParticleGroup unambiguous_fields_to_include = [] unambiguous_fields_units = [] - for field, field_unit in zip(fields_to_include, fields_units): + for field, field_unit in zip(fields_to_include, fields_units, strict=True): if isinstance(field, tuple): # skip tuples, they'll be checked with _determine_fields unambiguous_fields_to_include.append(field) @@ -849,7 +853,7 @@ def create_firefly_object( field_names = [] ## explicitly go after the fields we want - for field, units in zip(fields_to_include, fields_units): + for field, units in zip(fields_to_include, fields_units, strict=True): ## Only interested in fields with the current particle type, ## whether that means general fields or field tuples ftype, fname = field diff --git a/yt/data_objects/derived_quantities.py b/yt/data_objects/derived_quantities.py index 0ca5eaabb0..8bc41c0474 100644 --- a/yt/data_objects/derived_quantities.py +++ b/yt/data_objects/derived_quantities.py @@ -1,3 +1,5 @@ +import sys + import numpy as np from yt.funcs import camelcase_to_underscore, iter_fields @@ -11,6 +13,9 @@ from yt.utilities.physical_constants import gravitational_constant_cgs from yt.utilities.physical_ratios import HUGE +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def get_position_fields(field, data): axis_names = [data.ds.coordinates.axis_name[num] for num in [0, 1, 2]] @@ -415,7 +420,7 @@ def __call__(self, fields, weight): fields = list(iter_fields(fields)) units = [self.data_source.ds._get_field_info(field).units for field in fields] rv = super().__call__(fields, weight) - rv = [self.data_source.ds.arr(v, u) for v, u in zip(rv, units)] + rv = [self.data_source.ds.arr(v, u) for v, u in zip(rv, units, strict=True)] if len(rv) == 1: rv = rv[0] return rv @@ -431,7 +436,7 @@ def process_chunk(self, data, fields, weight): my_var2s = [ (data[weight].d * (data[field].d - my_mean) ** 2).sum(dtype=np.float64) / my_weight - for field, my_mean in zip(fields, my_means) + for field, my_mean in zip(fields, my_means, strict=True) ] return my_means + my_var2s + [my_weight] @@ -623,7 +628,7 @@ def reduce_intermediate(self, values): # The values get turned into arrays here. return [ self.data_source.ds.arr([mis.min(), mas.max()]) - for mis, mas in zip(values[::2], values[1::2]) + for mis, mas in zip(values[::2], values[1::2], strict=True) ] diff --git a/yt/data_objects/level_sets/clump_info_items.py b/yt/data_objects/level_sets/clump_info_items.py index 798aecc30b..d0c727f310 100644 --- a/yt/data_objects/level_sets/clump_info_items.py +++ b/yt/data_objects/level_sets/clump_info_items.py @@ -109,7 +109,7 @@ def _distance_to_main_clump(clump, units="pc"): distance = np.sqrt(((master_com - my_com) ** 2).sum()) distance.convert_to_units("pc") return ( - "Distance from master center of mass: %%.6e %s." % units, + f"Distance from master center of mass: %.6e {units}.", distance.in_units(units), ) diff --git a/yt/data_objects/level_sets/tests/test_clump_finding.py b/yt/data_objects/level_sets/tests/test_clump_finding.py index e44bd87033..bd7059f8c3 100644 --- a/yt/data_objects/level_sets/tests/test_clump_finding.py +++ b/yt/data_objects/level_sets/tests/test_clump_finding.py @@ -1,5 +1,6 @@ import os import shutil +import sys import tempfile import numpy as np @@ -12,6 +13,9 @@ from yt.testing import requires_file, requires_module from yt.utilities.answer_testing.framework import data_dir_load +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def test_clump_finding(): n_c = 8 @@ -132,7 +136,7 @@ def test_clump_tree_save(): it2 = np.argsort(mt2).astype("int64") assert_array_equal(mt1[it1], mt2[it2]) - for i1, i2 in zip(it1, it2): + for i1, i2 in zip(it1, it2, strict=True): ct1 = t1[i1] ct2 = t2[i2] assert_array_equal(ct1["gas", "density"], ct2["grid", "density"]) @@ -191,5 +195,5 @@ def _also_density(field, data): leaf_clumps_1 = master_clump_1.leaves leaf_clumps_2 = master_clump_2.leaves - for c1, c2 in zip(leaf_clumps_1, leaf_clumps_2): + for c1, c2 in zip(leaf_clumps_1, leaf_clumps_2, strict=True): assert_array_equal(c1["gas", "density"], c2["gas", "density"]) diff --git a/yt/data_objects/profiles.py b/yt/data_objects/profiles.py index f954620543..5aac6b0b6d 100644 --- a/yt/data_objects/profiles.py +++ b/yt/data_objects/profiles.py @@ -1,3 +1,5 @@ +import sys + import numpy as np from more_itertools import collapse @@ -23,6 +25,9 @@ parallel_objects, ) +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def _sanitize_min_max_units(amin, amax, finfo, registry): # returns a copy of amin and amax, converted to finfo's output units @@ -217,7 +222,7 @@ def _filter(self, bin_fields): # cut_points is set to be everything initially, but # we also want to apply a filtering based on min/max pfilter = np.ones(bin_fields[0].shape, dtype="bool") - for (mi, ma), data in zip(self.bounds, bin_fields): + for (mi, ma), data in zip(self.bounds, bin_fields, strict=True): pfilter &= data > mi pfilter &= data < ma return pfilter, [data[pfilter] for data in bin_fields] @@ -1313,7 +1318,11 @@ def create_profile( data_source.ds.field_info[f].sampling_type == "local" for f in bin_fields + fields ] - is_local_or_pfield = [pf or lf for (pf, lf) in zip(is_pfield, is_local)] + if wf is not None: + is_local.append(wf.sampling_type == "local") + is_local_or_pfield = [ + pf or lf for (pf, lf) in zip(is_pfield, is_local, strict=True) + ] if not all(is_local_or_pfield): raise YTIllDefinedProfile( bin_fields, data_source._determine_fields(fields), wf, is_pfield @@ -1370,7 +1379,7 @@ def create_profile( if extrema is None or not any(collapse(extrema.values())): ex = [ data_source.quantities["Extrema"](f, non_zero=l) - for f, l in zip(bin_fields, logs) + for f, l in zip(bin_fields, logs, strict=True) ] # pad extrema by epsilon so cells at bin edges are not excluded for i, (mi, ma) in enumerate(ex): @@ -1456,7 +1465,7 @@ def create_profile( o_bins.append(field_obin) args = [data_source] - for f, n, (mi, ma), l in zip(bin_fields, n_bins, ex, logs): + for f, n, (mi, ma), l in zip(bin_fields, n_bins, ex, logs, strict=True): if mi <= 0 and l: raise YTIllDefinedBounds(mi, ma) args += [f, n, mi, ma, l] @@ -1464,7 +1473,7 @@ def create_profile( if cls is ParticleProfile: kwargs["deposition"] = deposition if override_bins is not None: - for o_bin, ax in zip(o_bins, ["x", "y", "z"]): + for o_bin, ax in zip(o_bins, ["x", "y", "z"], strict=False): kwargs[f"override_bins_{ax}"] = o_bin obj = cls(*args, **kwargs) obj.accumulation = accumulation diff --git a/yt/data_objects/region_expression.py b/yt/data_objects/region_expression.py index 821e291bd4..7187c5606e 100644 --- a/yt/data_objects/region_expression.py +++ b/yt/data_objects/region_expression.py @@ -166,7 +166,7 @@ def _create_region(self, bounds_tuple): if d is not None: d = int(d) dims[ax] = d - center = [(cl + cr) / 2.0 for cl, cr in zip(left_edge, right_edge)] + center = (left_edge + right_edge) / 2.0 if None not in dims: return self.ds.arbitrary_grid(left_edge, right_edge, dims) return self.ds.region(center, left_edge, right_edge) diff --git a/yt/data_objects/selection_objects/slices.py b/yt/data_objects/selection_objects/slices.py index 9bb2965604..fd510737f3 100644 --- a/yt/data_objects/selection_objects/slices.py +++ b/yt/data_objects/selection_objects/slices.py @@ -6,6 +6,7 @@ ) from yt.data_objects.static_output import Dataset from yt.funcs import ( + fix_length, is_sequence, iter_fields, validate_3d_array, @@ -78,7 +79,7 @@ def __init__( validate_object(data_source, YTSelectionContainer) YTSelectionContainer2D.__init__(self, axis, ds, field_parameters, data_source) self._set_center(center) - self.coord = coord + self.coord = fix_length(coord, ds) def _generate_container_field(self, field): xax = self.ds.coordinates.x_axis[self.axis] diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 5c546a1849..80a7c5981d 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -13,16 +13,16 @@ from functools import cached_property from importlib.util import find_spec from stat import ST_CTIME -from typing import Any, Literal, Optional, Union +from typing import TYPE_CHECKING, Any, Literal, Optional, Union import numpy as np import unyt as un from more_itertools import unzip -from sympy import Symbol from unyt import Unit, UnitSystem, unyt_quantity from unyt.exceptions import UnitConversionError, UnitParseError from yt._maintenance.deprecation import issue_deprecation_warning +from yt._maintenance.ipython_compat import IPYWIDGETS_ENABLED from yt._typing import ( AnyFieldKey, AxisOrder, @@ -74,6 +74,9 @@ from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_root_only from yt.utilities.parameter_file_storage import NoParameterShelf, ParameterFileStore +if TYPE_CHECKING: + from sympy import Symbol + if sys.version_info >= (3, 11): from typing import assert_never else: @@ -119,10 +122,7 @@ def __init__(self, display_array=False): # We can assume that ipywidgets will not be *added* to the system # during the course of execution, and if it is, we will not wrap the # array. - if display_array and find_spec("ipywidgets") is not None: - self.display_array = True - else: - self.display_array = False + self.display_array = display_array and IPYWIDGETS_ENABLED def __get__(self, instance, owner): return self.data.get(instance, None) @@ -1395,7 +1395,7 @@ def set_units(self): new_unit, my_u.base_value / (1 + self.current_redshift), dimensions.length, - "\\rm{%s}/(1+z)" % my_unit, + f"\\rm{{{my_unit}}}/(1+z)", prefixable=True, ) self.unit_registry.modify("a", 1 / (1 + self.current_redshift)) diff --git a/yt/data_objects/tests/test_covering_grid.py b/yt/data_objects/tests/test_covering_grid.py index 376b7d3b0d..ed59776409 100644 --- a/yt/data_objects/tests/test_covering_grid.py +++ b/yt/data_objects/tests/test_covering_grid.py @@ -1,3 +1,5 @@ +import sys + import numpy as np from numpy.testing import assert_almost_equal, assert_array_equal, assert_equal @@ -11,6 +13,10 @@ ) from yt.units import kpc +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + + # cylindrical data for covering_grid test cyl_2d = "WDMerger_hdf5_chk_1000/WDMerger_hdf5_chk_1000.hdf5" cyl_3d = "MHD_Cyl3d_hdf5_plt_cnt_0100/MHD_Cyl3d_hdf5_plt_cnt_0100.hdf5" @@ -354,7 +360,7 @@ def test_arbitrary_grid_edge(): [1.0, 1.0, 1.0] * kpc, ] - for le, re, le_ans, re_ans in zip(ledge, redge, ledge_ans, redge_ans): + for le, re, le_ans, re_ans in zip(ledge, redge, ledge_ans, redge_ans, strict=True): ag = ds.arbitrary_grid(left_edge=le, right_edge=re, dims=dims) assert np.array_equal(ag.left_edge, le_ans) assert np.array_equal(ag.right_edge, re_ans) diff --git a/yt/data_objects/tests/test_derived_quantities.py b/yt/data_objects/tests/test_derived_quantities.py index 730ad06c60..93e343282b 100644 --- a/yt/data_objects/tests/test_derived_quantities.py +++ b/yt/data_objects/tests/test_derived_quantities.py @@ -1,3 +1,5 @@ +import sys + import numpy as np from numpy.testing import assert_almost_equal, assert_equal @@ -11,6 +13,9 @@ requires_file, ) +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def setup_module(): from yt.config import ytcfg @@ -183,10 +188,14 @@ def test_in_memory_sph_derived_quantities(): assert_equal(com, [1 / 7, (1 + 2) / 7, (1 + 2 + 3) / 7]) ex = ad.quantities.extrema([("io", "x"), ("io", "y"), ("io", "z")]) - for fex, ans in zip(ex, [[0, 1], [0, 2], [0, 3]]): + for fex, ans in zip(ex, [[0, 1], [0, 2], [0, 3]], strict=True): assert_equal(fex, ans) - for d, v, l in zip("xyz", [1, 2, 3], [[1, 0, 0], [0, 2, 0], [0, 0, 3]]): + for d, v, l in [ + ("x", 1, [1, 0, 0]), + ("y", 2, [0, 2, 0]), + ("z", 3, [0, 0, 3]), + ]: max_d, x, y, z = ad.quantities.max_location(("io", d)) assert_equal(max_d, v) assert_equal([x, y, z], l) diff --git a/yt/data_objects/tests/test_disks.py b/yt/data_objects/tests/test_disks.py index 1e2abebe50..8d87ed1d07 100644 --- a/yt/data_objects/tests/test_disks.py +++ b/yt/data_objects/tests/test_disks.py @@ -1,5 +1,4 @@ -from nose.tools import assert_raises -from numpy.testing import assert_equal +import pytest from yt import YTQuantity from yt.testing import fake_random_ds @@ -10,31 +9,37 @@ def test_bad_disk_input(): ds = fake_random_ds(16) # Test invalid 3d array - with assert_raises(TypeError) as ex: + with pytest.raises( + TypeError, + match=r"^Expected an array of size \(3,\), received 'list' of length 4$", + ): ds.disk(ds.domain_center, [0, 0, 1, 1], (10, "kpc"), (20, "kpc")) - desired = "Expected an array of size (3,), received 'list' of length 4" - assert_equal(str(ex.exception), desired) # Test invalid float - with assert_raises(TypeError) as ex: + with pytest.raises( + TypeError, + match=( + r"^Expected a numeric value \(or size-1 array\), " + r"received 'unyt.array.unyt_array' of length 3$" + ), + ): ds.disk(ds.domain_center, [0, 0, 1], ds.domain_center, (20, "kpc")) - desired = ( - "Expected a numeric value (or size-1 array)," - " received 'unyt.array.unyt_array' of length 3" - ) - assert_equal(str(ex.exception), desired) # Test invalid float - with assert_raises(TypeError) as ex: + with pytest.raises( + TypeError, + match=( + r"^Expected a numeric value \(or tuple of format \(float, String\)\), " + r"received an inconsistent tuple '\(10, 10\)'.$" + ), + ): ds.disk(ds.domain_center, [0, 0, 1], (10, 10), (20, "kpc")) - desired = ( - "Expected a numeric value (or tuple of format (float, String))," - " received an inconsistent tuple '(10, 10)'." - ) - assert_equal(str(ex.exception), desired) # Test invalid iterable - with assert_raises(TypeError) as ex: + with pytest.raises( + TypeError, + match=r"^Expected an iterable object, received 'unyt\.array\.unyt_quantity'$", + ): ds.disk( ds.domain_center, [0, 0, 1], @@ -42,18 +47,16 @@ def test_bad_disk_input(): (20, "kpc"), fields=YTQuantity(1, "kpc"), ) - desired = "Expected an iterable object, received 'unyt.array.unyt_quantity'" - assert_equal(str(ex.exception), desired) # Test invalid object - with assert_raises(TypeError) as ex: + with pytest.raises( + TypeError, + match=( + r"^Expected an object of 'yt\.data_objects\.static_output\.Dataset' type, " + r"received 'yt\.data_objects\.selection_objects\.region\.YTRegion'$" + ), + ): ds.disk(ds.domain_center, [0, 0, 1], (10, "kpc"), (20, "kpc"), ds=ds.all_data()) - desired = ( - "Expected an object of 'yt.data_objects.static_output.Dataset' " - "type, received " - "'yt.data_objects.selection_objects.region.YTRegion'" - ) - assert_equal(str(ex.exception), desired) # Test valid disk ds.disk(ds.domain_center, [0, 0, 1], (10, "kpc"), (20, "kpc")) diff --git a/yt/data_objects/tests/test_sph_data_objects.py b/yt/data_objects/tests/test_sph_data_objects.py index 990759af57..8cf769f224 100644 --- a/yt/data_objects/tests/test_sph_data_objects.py +++ b/yt/data_objects/tests/test_sph_data_objects.py @@ -64,6 +64,16 @@ def test_slice(): } +def test_slice_to_frb(): + ds = fake_sph_orientation_ds() + frb = ds.slice(0, 0.5).to_frb(ds.domain_width[0], (64, 64)) + ref_vals = frb["gas", "density"] + for center in ((0.5, "code_length"), (0.5, "cm"), ds.quan(0.5, "code_length")): + frb = ds.slice(0, center).to_frb(ds.domain_width[0], (64, 64)) + vals = frb["gas", "density"] + assert_equal(vals, ref_vals) + + def test_region(): ds = fake_sph_orientation_ds() for (left_edge, right_edge), answer in REGION_ANSWERS.items(): @@ -92,7 +102,7 @@ def test_periodic_region(): for y in coords: for z in coords: center = np.array([x, y, z]) - for n, w in zip((8, 27), (1.0, 2.0)): + for n, w in [(8, 1.0), (27, 2.0)]: le = center - 0.5 * w re = center + 0.5 * w box = ds.box(le, re) diff --git a/yt/fields/derived_field.py b/yt/fields/derived_field.py index 42b6ff6636..cfb1c4d59b 100644 --- a/yt/fields/derived_field.py +++ b/yt/fields/derived_field.py @@ -1,6 +1,7 @@ import contextlib import inspect import re +import sys from collections.abc import Iterable from typing import Optional, Union @@ -25,6 +26,9 @@ NeedsProperty, ) +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def TranslationFunc(field_name): def _TranslationFunc(field, data): @@ -255,7 +259,7 @@ def _get_needed_parameters(self, fd): else: params.extend(val.parameters) values.extend([fd.get_field_parameter(fp) for fp in val.parameters]) - return dict(zip(params, values)), permute_params + return dict(zip(params, values, strict=True)), permute_params _unit_registry = None @@ -303,7 +307,7 @@ def get_label(self, projected=False): name = self.display_name # Start with the field name - data_label = r"$\rm{%s}" % name + data_label = rf"$\rm{{{name}}}" # Grab the correct units if projected: diff --git a/yt/fields/field_type_container.py b/yt/fields/field_type_container.py index 9a21be7222..3353138d31 100644 --- a/yt/fields/field_type_container.py +++ b/yt/fields/field_type_container.py @@ -7,6 +7,7 @@ import weakref from functools import cached_property +from yt._maintenance.ipython_compat import IPYWIDGETS_ENABLED from yt.fields.derived_field import DerivedField @@ -59,22 +60,24 @@ def __contains__(self, obj): return ob in self.field_types - def _ipython_display_(self): - import ipywidgets - from IPython.display import display + if IPYWIDGETS_ENABLED: - fnames = [] - children = [] - for ftype in sorted(self.field_types): - fnc = getattr(self, ftype) - children.append(ipywidgets.Output()) - with children[-1]: - display(fnc) - fnames.append(ftype) - tabs = ipywidgets.Tab(children=children) - for i, n in enumerate(fnames): - tabs.set_title(i, n) - display(tabs) + def _ipython_display_(self): + import ipywidgets + from IPython.display import display + + fnames = [] + children = [] + for ftype in sorted(self.field_types): + fnc = getattr(self, ftype) + children.append(ipywidgets.Output()) + with children[-1]: + display(fnc) + fnames.append(ftype) + tabs = ipywidgets.Tab(children=children) + for i, n in enumerate(fnames): + tabs.set_title(i, n) + display(tabs) class FieldNameContainer: @@ -112,46 +115,55 @@ def __contains__(self, obj): return True return False - def _ipython_display_(self): - import ipywidgets - from IPython.display import Markdown, display - - names = dir(self) - names.sort() - - def change_field(_ftype, _box, _var_window): - def _change_field(event): - fobj = getattr(_ftype, event["new"]) - _box.clear_output() - with _box: - display( - Markdown( - data="```python\n" - + textwrap.dedent(fobj.get_source()) - + "\n```" + if IPYWIDGETS_ENABLED: + # for discussion of this class-level conditional: https://github.com/yt-project/yt/pull/4941 + + def _ipython_display_(self): + import ipywidgets + from IPython.display import Markdown, display + + names = dir(self) + names.sort() + + def change_field(_ftype, _box, _var_window): + def _change_field(event): + fobj = getattr(_ftype, event["new"]) + _box.clear_output() + with _box: + display( + Markdown( + data="```python\n" + + textwrap.dedent(fobj.get_source()) + + "\n```" + ) ) - ) - values = inspect.getclosurevars(fobj._function).nonlocals - _var_window.value = _fill_values(values) + values = inspect.getclosurevars(fobj._function).nonlocals + _var_window.value = _fill_values(values) - return _change_field + return _change_field - flist = ipywidgets.Select(options=names, layout=ipywidgets.Layout(height="95%")) - source = ipywidgets.Output(layout=ipywidgets.Layout(width="100%", height="9em")) - var_window = ipywidgets.HTML(value="Empty") - var_box = ipywidgets.Box( - layout=ipywidgets.Layout(width="100%", height="100%", overflow_y="scroll") - ) - var_box.children = [var_window] - ftype_tabs = ipywidgets.Tab( - children=[source, var_box], - layout=ipywidgets.Layout(flex="2 1 auto", width="auto", height="95%"), - ) - ftype_tabs.set_title(0, "Source") - ftype_tabs.set_title(1, "Variables") - flist.observe(change_field(self, source, var_window), "value") - display( - ipywidgets.HBox( - [flist, ftype_tabs], layout=ipywidgets.Layout(height="14em") + flist = ipywidgets.Select( + options=names, layout=ipywidgets.Layout(height="95%") + ) + source = ipywidgets.Output( + layout=ipywidgets.Layout(width="100%", height="9em") + ) + var_window = ipywidgets.HTML(value="Empty") + var_box = ipywidgets.Box( + layout=ipywidgets.Layout( + width="100%", height="100%", overflow_y="scroll" + ) + ) + var_box.children = [var_window] + ftype_tabs = ipywidgets.Tab( + children=[source, var_box], + layout=ipywidgets.Layout(flex="2 1 auto", width="auto", height="95%"), + ) + ftype_tabs.set_title(0, "Source") + ftype_tabs.set_title(1, "Variables") + flist.observe(change_field(self, source, var_window), "value") + display( + ipywidgets.HBox( + [flist, ftype_tabs], layout=ipywidgets.Layout(height="14em") + ) ) - ) diff --git a/yt/fields/magnetic_field.py b/yt/fields/magnetic_field.py index d9e7cb5792..8eab02ff3d 100644 --- a/yt/fields/magnetic_field.py +++ b/yt/fields/magnetic_field.py @@ -10,6 +10,9 @@ from .field_plugin_registry import register_field_plugin +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + if sys.version_info >= (3, 11): from typing import assert_never else: @@ -330,7 +333,7 @@ def _mag_field(field, data): return _mag_field - for ax, fd in zip(registry.ds.coordinates.axis_order, ds_fields): + for ax, fd in zip(registry.ds.coordinates.axis_order, ds_fields, strict=False): registry.add_field( (ftype, f"magnetic_field_{ax}"), sampling_type=sampling_type, diff --git a/yt/fields/particle_fields.py b/yt/fields/particle_fields.py index c52bd2a0b4..e1ab8db2e8 100644 --- a/yt/fields/particle_fields.py +++ b/yt/fields/particle_fields.py @@ -110,7 +110,7 @@ def particle_count(field, data): function=particle_count, validators=[ValidateSpatial()], units="", - display_name=r"\mathrm{%s Count}" % ptype_dn, + display_name=rf"\mathrm{{{ptype_dn} Count}}", ) def particle_mass(field, data): @@ -125,7 +125,7 @@ def particle_mass(field, data): sampling_type="cell", function=particle_mass, validators=[ValidateSpatial()], - display_name=r"\mathrm{%s Mass}" % ptype_dn, + display_name=rf"\mathrm{{{ptype_dn} Mass}}", units=unit_system["mass"], ) @@ -144,7 +144,7 @@ def particle_density(field, data): sampling_type="cell", function=particle_density, validators=[ValidateSpatial()], - display_name=r"\mathrm{%s Density}" % ptype_dn, + display_name=rf"\mathrm{{{ptype_dn} Density}}", units=unit_system["density"], ) @@ -160,7 +160,7 @@ def particle_cic(field, data): sampling_type="cell", function=particle_cic, validators=[ValidateSpatial()], - display_name=r"\mathrm{%s CIC Density}" % ptype_dn, + display_name=rf"\mathrm{{{ptype_dn} CIC Density}}", units=unit_system["density"], ) @@ -184,7 +184,7 @@ def _deposit_field(field, data): return _deposit_field for ax in "xyz": - for method, name in zip(("cic", "sum"), ("cic", "nn")): + for method, name in [("cic", "cic"), ("sum", "nn")]: function = _get_density_weighted_deposit_field( f"particle_velocity_{ax}", "code_velocity", method ) @@ -197,7 +197,7 @@ def _deposit_field(field, data): validators=[ValidateSpatial(0)], ) - for method, name in zip(("cic", "sum"), ("cic", "nn")): + for method, name in [("cic", "cic"), ("sum", "nn")]: function = _get_density_weighted_deposit_field("age", "code_time", method) registry.add_field( ("deposit", ("%s_" + name + "_age") % (ptype)), diff --git a/yt/fields/vector_operations.py b/yt/fields/vector_operations.py index 0c3cbe0a2f..860cf076ea 100644 --- a/yt/fields/vector_operations.py +++ b/yt/fields/vector_operations.py @@ -19,6 +19,9 @@ from .derived_field import NeedsParameter, ValidateParameter, ValidateSpatial +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + if sys.version_info >= (3, 11): from typing import assert_never else: @@ -133,7 +136,7 @@ def _los_field(field, data): function=_los_field, units=field_units, validators=validators, - display_name=r"\mathrm{Line of Sight %s}" % basename.capitalize(), + display_name=rf"\mathrm{{Line of Sight {basename.capitalize()}}}", ) @@ -362,7 +365,7 @@ def _divergence(field, data): ds = div_fac * just_one(data["index", "dz"]) f += data[zn[0], f"relative_{zn[1]}"][1:-1, 1:-1, sl_right] / ds f -= data[zn[0], f"relative_{zn[1]}"][1:-1, 1:-1, sl_left] / ds - new_field = data.ds.arr(np.zeros(data[xn].shape, dtype=np.float64), f.units) + new_field = data.ds.arr(np.zeros(data[xn].shape, dtype="f8"), str(f.units)) new_field[1:-1, 1:-1, 1:-1] = f return new_field @@ -676,7 +679,7 @@ def atleast_4d(array): ) i_i, j_i, k_i = np.mgrid[0:3, 0:3, 0:3] - for i, j, k in zip(i_i.ravel(), j_i.ravel(), k_i.ravel()): + for i, j, k in zip(i_i.ravel(), j_i.ravel(), k_i.ravel(), strict=True): sl = ( slice(i, nx - (2 - i)), slice(j, ny - (2 - j)), diff --git a/yt/frontends/__init__.py b/yt/frontends/__init__.py index 989c1deea5..4a5d6a2ff7 100644 --- a/yt/frontends/__init__.py +++ b/yt/frontends/__init__.py @@ -1,12 +1,13 @@ __all__ = [ "adaptahop", "ahf", + "amrex", "amrvac", "art", "artio", "athena", "athena_pp", - "boxlib", + "boxlib", # the boxlib frontend is deprecated, use 'amrex' "cf_radial", "chimera", "chombo", diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index d36bcff4a2..e8cb853568 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -107,7 +107,7 @@ def _guess_headers_from_file(self, filename) -> None: pass if not ok: - raise OSError("Could not read headers from file %s" % filename) + raise OSError(f"Could not read headers from file {filename}") istart = fpu.tell() fpu.seek(0, 2) @@ -130,7 +130,7 @@ def _guess_headers_from_file(self, filename) -> None: continue if not ok: - raise OSError("Could not guess fields from file %s" % filename) + raise OSError(f"Could not guess fields from file {filename}") self._header_attributes = header_attributes self._halo_attributes = attributes diff --git a/yt/frontends/boxlib/definitions.py b/yt/frontends/amrex/__init__.py similarity index 100% rename from yt/frontends/boxlib/definitions.py rename to yt/frontends/amrex/__init__.py diff --git a/yt/frontends/amrex/api.py b/yt/frontends/amrex/api.py new file mode 100644 index 0000000000..334cc8bb63 --- /dev/null +++ b/yt/frontends/amrex/api.py @@ -0,0 +1,24 @@ +from . import tests +from .data_structures import ( + AMReXDataset, + AMReXHierarchy, + BoxlibDataset, + BoxlibGrid, + BoxlibHierarchy, + CastroDataset, + MaestroDataset, + NyxDataset, + NyxHierarchy, + OrionDataset, + OrionHierarchy, + WarpXDataset, + WarpXHierarchy, +) +from .fields import ( + BoxlibFieldInfo, + CastroFieldInfo, + MaestroFieldInfo, + NyxFieldInfo, + WarpXFieldInfo, +) +from .io import IOHandlerBoxlib diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/amrex/data_structures.py similarity index 96% rename from yt/frontends/boxlib/data_structures.py rename to yt/frontends/amrex/data_structures.py index 2725275896..ddf0052e81 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/amrex/data_structures.py @@ -385,8 +385,10 @@ def _parse_index(self): default_ybounds = (0.0, np.pi) default_zbounds = (0.0, 2 * np.pi) else: + header_file.close() raise RuntimeError("Unknown BoxLib coordinate system.") if int(next(header_file)) != 0: + header_file.close() raise RuntimeError("INTERNAL ERROR! This should be a zero.") # each level is one group with ngrids on it. @@ -453,9 +455,11 @@ def _parse_index(self): go = self.grid(grid_counter + gi, int(offset), filename, self) go.Level = self.grid_levels[grid_counter + gi, :] = level self.grids.append(go) + level_header_file.close() grid_counter += ngrids # already read the filenames above... self.float_type = "float64" + header_file.close() def _cache_endianness(self, test_grid): """ @@ -513,7 +517,7 @@ def _reconstruct_parent_child(self): get_box_grids_level( self.grid_left_edge[i, :], self.grid_right_edge[i, :], - self.grid_levels[i] + 1, + self.grid_levels[i].item() + 1, self.grid_left_edge, self.grid_right_edge, self.grid_levels, @@ -543,6 +547,7 @@ def _count_grids(self): if len(line.split()) != 3: continue self.num_grids += int(line.split()[1]) + header_file.close() def _initialize_grid_arrays(self): super()._initialize_grid_arrays() @@ -647,7 +652,7 @@ def __init__( cparam_filename = cparam_filename or self.__class__._default_cparam_filename self.cparam_filename = self._lookup_cparam_filepath( - output_dir, cparam_filename=cparam_filename + self.output_dir, cparam_filename=cparam_filename ) self.fparam_filename = self._localize_check(fparam_filename) self.storage_filename = storage_filename @@ -698,7 +703,8 @@ def _is_valid(cls, filename, *args, cparam_filename=None, **kwargs): if cparam_filepath is None: return False - lines = [line.lower() for line in open(cparam_filepath).readlines()] + with open(cparam_filepath) as f: + lines = [line.lower() for line in f] return any(cls._subtype_keyword in line for line in lines) @classmethod @@ -734,39 +740,41 @@ def _parse_parameter_file(self): def _parse_cparams(self): if self.cparam_filename is None: return - for line in (line.split("#")[0].strip() for line in open(self.cparam_filename)): - try: - param, vals = (s.strip() for s in line.split("=")) - except ValueError: - continue - # Castro and Maestro mark overridden defaults with a "[*]" before - # the parameter name - param = param.removeprefix("[*]").strip() - if param == "amr.ref_ratio": - vals = self.refine_by = int(vals[0]) - elif param == "Prob.lo_bc": - vals = tuple(p == "1" for p in vals.split()) - assert len(vals) == self.dimensionality - periodicity = [False, False, False] # default to non periodic - periodicity[: self.dimensionality] = vals # fill in ndim parsed values - self._periodicity = tuple(periodicity) - elif param == "castro.use_comoving": - vals = self.cosmological_simulation = int(vals) - else: + with open(self.cparam_filename) as param_file: + for line in (line.split("#")[0].strip() for line in param_file): try: - vals = _guess_pcast(vals) - except (IndexError, ValueError): - # hitting an empty string or a comment - vals = None - self.parameters[param] = vals + param, vals = (s.strip() for s in line.split("=")) + except ValueError: + continue + # Castro and Maestro mark overridden defaults with a "[*]" + # before the parameter name + param = param.removeprefix("[*]").strip() + if param == "amr.ref_ratio": + vals = self.refine_by = int(vals[0]) + elif param == "Prob.lo_bc": + vals = tuple(p == "1" for p in vals.split()) + assert len(vals) == self.dimensionality + # default to non periodic + periodicity = [False, False, False] + # fill in ndim parsed values + periodicity[: self.dimensionality] = vals + self._periodicity = tuple(periodicity) + elif param == "castro.use_comoving": + vals = self.cosmological_simulation = int(vals) + else: + try: + vals = _guess_pcast(vals) + except (IndexError, ValueError): + # hitting an empty string or a comment + vals = None + self.parameters[param] = vals if getattr(self, "cosmological_simulation", 0) == 1: self.omega_lambda = self.parameters["comoving_OmL"] self.omega_matter = self.parameters["comoving_OmM"] self.hubble_constant = self.parameters["comoving_h"] - a_file = open(os.path.join(self.output_dir, "comoving_a")) - line = a_file.readline().strip() - a_file.close() + with open(os.path.join(self.output_dir, "comoving_a")) as a_file: + line = a_file.readline().strip() self.current_redshift = 1 / float(line) - 1 else: self.current_redshift = 0.0 @@ -783,7 +791,8 @@ def _parse_fparams(self): """ if self.fparam_filename is None: return - for line in (l for l in open(self.fparam_filename) if "=" in l): + param_file = open(self.fparam_filename) + for line in (l for l in param_file if "=" in l): param, vals = (v.strip() for v in line.split("=")) # Now, there are a couple different types of parameters. # Some will be where you only have floating point values, others @@ -798,6 +807,7 @@ def _parse_fparams(self): if len(vals) == 1: vals = vals[0] self.parameters[param] = vals + param_file.close() def _parse_header_file(self): """ @@ -823,7 +833,7 @@ def _parse_header_file(self): # in a slightly hidden variable. self._max_level = int(header_file.readline()) - for side, init in zip(["left", "right"], [np.zeros, np.ones]): + for side, init in [("left", np.zeros), ("right", np.ones)]: domain_edge = init(3, dtype="float64") domain_edge[: self.dimensionality] = header_file.readline().split() setattr(self, f"domain_{side}_edge", domain_edge) @@ -845,6 +855,7 @@ def _parse_header_file(self): float(rf) / self.refine_by == int(float(rf) / self.refine_by) for rf in ref_factors ): + header_file.close() raise RuntimeError base_log = np.log2(self.refine_by) self.level_offsets = [0] # level 0 has to have 0 offset @@ -888,6 +899,7 @@ def _parse_header_file(self): try: geom_str = known_types[coordinate_type] except KeyError as err: + header_file.close() raise ValueError(f"Unknown BoxLib coord_type `{coordinate_type}`.") from err else: self.geometry = Geometry(geom_str) @@ -897,6 +909,8 @@ def _parse_header_file(self): dre[2] = 2.0 * np.pi self.domain_right_edge = dre + header_file.close() + def _set_code_unit_attributes(self): setdefaultattr(self, "length_unit", self.quan(1.0, "cm")) setdefaultattr(self, "mass_unit", self.quan(1.0, "g")) @@ -1289,9 +1303,8 @@ def _parse_parameter_file(self): # Read in the `comoving_a` file and parse the value. We should fix this # in the new Nyx output format... - a_file = open(os.path.join(self.output_dir, "comoving_a")) - a_string = a_file.readline().strip() - a_file.close() + with open(os.path.join(self.output_dir, "comoving_a")) as a_file: + a_string = a_file.readline().strip() # Set the scale factor and redshift self.cosmological_scale_factor = float(a_string) @@ -1337,7 +1350,7 @@ def _guess_pcast(vals): pcast = float else: pcast = int - if pcast == bool: + if pcast is bool: vals = [value == "T" for value in vals.split()] else: vals = [pcast(value) for value in vals.split()] diff --git a/yt/frontends/boxlib/misc.py b/yt/frontends/amrex/definitions.py similarity index 100% rename from yt/frontends/boxlib/misc.py rename to yt/frontends/amrex/definitions.py diff --git a/yt/frontends/boxlib/fields.py b/yt/frontends/amrex/fields.py similarity index 84% rename from yt/frontends/boxlib/fields.py rename to yt/frontends/amrex/fields.py index 5e32ad83e7..3901cf180c 100644 --- a/yt/frontends/boxlib/fields.py +++ b/yt/frontends/amrex/fields.py @@ -1,5 +1,5 @@ import re -import string +import sys import numpy as np @@ -8,12 +8,15 @@ from yt.units import YTQuantity from yt.utilities.physical_constants import amu_cgs, boltzmann_constant_cgs, c +if sys.version_info >= (3, 10): + from typing import TypeAlias +else: + from typing_extensions import TypeAlias + rho_units = "code_mass / code_length**3" mom_units = "code_mass / (code_time * code_length**2)" eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3 -spec_finder = re.compile(r".*\((\D*)(\d*)\).*") - def _thermal_energy_density(field, data): # What we've got here is UEINT: @@ -325,7 +328,9 @@ class CastroFieldInfo(FieldInfoContainer): ("erg/cm**3", ["kinetic_energy_density"], r"\frac{1}{2}\rho|\mathbf{U}|^2"), ), ("soundspeed", ("cm/s", ["sound_speed"], "Sound Speed")), - ("Machnumber", ("", ["mach_number"], "Mach Number")), + ("MachNumber", ("", ["mach_number"], "Mach Number")), + ("abar", ("", [], r"$\bar{A}$")), + ("Ye", ("", [], r"$Y_e$")), ("entropy", ("erg/(g*K)", ["entropy"], r"s")), ("magvort", ("1/s", ["vorticity_magnitude"], r"|\nabla \times \mathbf{U}|")), ("divu", ("1/s", ["velocity_divergence"], r"\nabla \cdot \mathbf{U}")), @@ -358,25 +363,23 @@ def setup_fluid_fields(self): for _, field in self.ds.field_list: if field.startswith("X("): # We have a fraction - nice_name, tex_label = _nice_species_name(field) - self.alias( - ("gas", f"{nice_name}_fraction"), ("boxlib", field), units="" + sub = Substance(field) + # Overwrite field to use nicer tex_label display_name + self.add_output_field( + ("boxlib", field), + sampling_type="cell", + units="", + display_name=rf"X\left({sub.to_tex()}\right)", ) - func = _create_density_func(("gas", f"{nice_name}_fraction")) + self.alias(("gas", f"{sub}_fraction"), ("boxlib", field), units="") + func = _create_density_func(("gas", f"{sub}_fraction")) self.add_field( - name=("gas", f"{nice_name}_density"), + name=("gas", f"{sub}_density"), sampling_type="cell", function=func, units=self.ds.unit_system["density"], + display_name=rf"\rho {sub.to_tex()}", ) - # We know this will either have one letter, or two. - if field[3] in string.ascii_letters: - element, weight = field[2:4], field[4:-1] - else: - element, weight = field[2:3], field[3:-1] # NOQA - - # Here we can, later, add number density - # right now element and weight inferred above are unused class MaestroFieldInfo(FieldInfoContainer): @@ -471,43 +474,27 @@ def setup_fluid_fields(self): for _, field in self.ds.field_list: if field.startswith("X("): # We have a mass fraction - nice_name, tex_label = _nice_species_name(field) + sub = Substance(field) # Overwrite field to use nicer tex_label display_name self.add_output_field( ("boxlib", field), sampling_type="cell", units="", - display_name=tex_label, - ) - self.alias( - ("gas", f"{nice_name}_fraction"), ("boxlib", field), units="" + display_name=rf"X\left({sub.to_tex()}\right)", ) - func = _create_density_func(("gas", f"{nice_name}_fraction")) + self.alias(("gas", f"{sub}_fraction"), ("boxlib", field), units="") + func = _create_density_func(("gas", f"{sub}_fraction")) self.add_field( - name=("gas", f"{nice_name}_density"), + name=("gas", f"{sub}_density"), sampling_type="cell", function=func, units=unit_system["density"], - display_name=r"\rho %s" % tex_label, + display_name=rf"\rho {sub.to_tex()}", ) - # Most of the time our species will be of the form - # element name + atomic weight (e.g. C12), but - # sometimes we make up descriptive names (e.g. ash) - if any(char.isdigit() for char in field): - # We know this will either have one letter, or two. - if field[3] in string.ascii_letters: - element, weight = field[2:4], field[4:-1] - else: - element, weight = field[2:3], field[3:-1] # NOQA - weight = int(weight) - - # Here we can, later, add number density using 'element' and - # 'weight' inferred above - elif field.startswith("omegadot("): - nice_name, tex_label = _nice_species_name(field) - display_name = r"\dot{\omega}\left[%s\right]" % tex_label + sub = Substance(field) + display_name = rf"\dot{{\omega}}\left[{sub.to_tex()}\right]" # Overwrite field to use nicer tex_label'ed display_name self.add_output_field( ("boxlib", field), @@ -516,23 +503,70 @@ def setup_fluid_fields(self): display_name=display_name, ) self.alias( - ("gas", f"{nice_name}_creation_rate"), + ("gas", f"{sub}_creation_rate"), ("boxlib", field), units=unit_system["frequency"], ) -def _nice_species_name(field): - spec_match = spec_finder.search(field) - nice_name = "".join(spec_match.groups()) - # if the species field is a descriptive name, then the match - # on the integer will be blank - # modify the tex string in this case to remove spurious tex spacing - lab = r"X\left(^{%s}%s\right)" - if spec_match.groups()[-1] == "": - lab = r"X\left(%s%s\right)" - tex_label = lab % spec_match.groups()[::-1] - return nice_name, tex_label +substance_expr_re = re.compile(r"\(([a-zA-Z][a-zA-Z0-9]*)\)") +substance_elements_re = re.compile(r"(?P[a-zA-Z]+)(?P\d*)") +SubstanceSpec: TypeAlias = list[tuple[str, int]] + + +class Substance: + def __init__(self, data: str) -> None: + if (m := substance_expr_re.search(data)) is None: + raise ValueError(f"{data!r} doesn't match expected regular expression") + sub_str = m.group() + constituents = substance_elements_re.findall(sub_str) + + # 0 is used as a sentinel value to mark descriptive names + default_value = 1 if len(constituents) > 1 else 0 + self._spec: SubstanceSpec = [ + (name, int(count or default_value)) for (name, count) in constituents + ] + + def get_spec(self) -> SubstanceSpec: + return self._spec.copy() + + def is_isotope(self) -> bool: + return len(self._spec) == 1 and self._spec[0][1] > 0 + + def is_molecule(self) -> bool: + return len(self._spec) != 1 + + def is_descriptive_name(self) -> bool: + return len(self._spec) == 1 and self._spec[0][1] == 0 + + def __str__(self) -> str: + return "".join( + f"{element}{count if count > 1 else ''}" for element, count in self._spec + ) + + def _to_tex_isotope(self) -> str: + element, count = self._spec[0] + return rf"^{{{count}}}{element}" + + def _to_tex_molecule(self) -> str: + return "".join( + rf"{element}_{{{count if count>1 else ''}}}" + for element, count in self._spec + ) + + def _to_tex_descriptive(self) -> str: + return str(self) + + def to_tex(self) -> str: + if self.is_isotope(): + return self._to_tex_isotope() + elif self.is_molecule(): + return self._to_tex_molecule() + elif self.is_descriptive_name(): + return self._to_tex_descriptive() + else: + # should only be reachable in case of a regular expression defect + raise RuntimeError def _create_density_func(field_name): diff --git a/yt/frontends/boxlib/io.py b/yt/frontends/amrex/io.py similarity index 100% rename from yt/frontends/boxlib/io.py rename to yt/frontends/amrex/io.py diff --git a/yt/frontends/amrex/misc.py b/yt/frontends/amrex/misc.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/yt/frontends/amrex/tests/__init__.py b/yt/frontends/amrex/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/yt/frontends/amrex/tests/test_field_parsing.py b/yt/frontends/amrex/tests/test_field_parsing.py new file mode 100644 index 0000000000..ec3fbe779f --- /dev/null +++ b/yt/frontends/amrex/tests/test_field_parsing.py @@ -0,0 +1,51 @@ +import pytest + +from yt.frontends.amrex.fields import Substance + + +@pytest.mark.parametrize( + "data, expected", + [ + pytest.param("X(He5)", [("He", 5)], id="isotope_1"), + pytest.param("X(C12)", [("C", 12)], id="isotope_2"), + pytest.param("X(A1B2C3)", [("A", 1), ("B", 2), ("C", 3)], id="molecule_1"), + pytest.param("X(C12H24)", [("C", 12), ("H", 24)], id="molecule_2"), + pytest.param("X(H2O)", [("H", 2), ("O", 1)], id="molecule_3"), + pytest.param("X(ash)", [("ash", 0)], id="descriptive_name"), + ], +) +def test_Substance_spec(data, expected): + assert Substance(data)._spec == expected + + +@pytest.mark.parametrize( + "data, expected_type", + [ + pytest.param("X(He5)", "isotope", id="isotope_1"), + pytest.param("X(C12)", "isotope", id="isotope_2"), + pytest.param("X(A1B2C3)", "molecule", id="molecule_1"), + pytest.param("X(C12H24)", "molecule", id="molecule_2"), + pytest.param("X(H2O)", "molecule", id="molecule_3"), + pytest.param("X(ash)", "descriptive_name", id="descriptive_name"), + ], +) +def test_Substance_type(data, expected_type): + sub = Substance(data) + assert getattr(sub, f"is_{expected_type}")() + + +@pytest.mark.parametrize( + "data, expected_str, expected_tex", + [ + pytest.param("X(He5)", "He5", "^{5}He", id="isotope_1"), + pytest.param("X(C12)", "C12", "^{12}C", id="isotope_2"), + pytest.param("X(A1B2C3)", "AB2C3", "A_{}B_{2}C_{3}", id="molecule_1"), + pytest.param("X(C12H24)", "C12H24", "C_{12}H_{24}", id="molecule_2"), + pytest.param("X(H2O)", "H2O", "H_{2}O_{}", id="molecule_2"), + pytest.param("X(ash)", "ash", "ash", id="descriptive_name"), + ], +) +def test_Substance_to_str(data, expected_str, expected_tex): + sub = Substance(data) + assert str(sub) == expected_str + assert sub.to_tex() == expected_tex diff --git a/yt/frontends/amrex/tests/test_outputs.py b/yt/frontends/amrex/tests/test_outputs.py new file mode 100644 index 0000000000..bfde30d0fb --- /dev/null +++ b/yt/frontends/amrex/tests/test_outputs.py @@ -0,0 +1,445 @@ +import numpy as np +from numpy.testing import assert_allclose, assert_equal + +from yt.frontends.amrex.api import ( + AMReXDataset, + CastroDataset, + MaestroDataset, + NyxDataset, + OrionDataset, + WarpXDataset, +) +from yt.loaders import load +from yt.testing import ( + disable_dataset_cache, + requires_file, + units_override_check, +) +from yt.utilities.answer_testing.framework import ( + GridValuesTest, + data_dir_load, + requires_ds, + small_patch_amr, +) + +# We don't do anything needing ghost zone generation right now, because these +# are non-periodic datasets. +_orion_fields = ( + ("gas", "temperature"), + ("gas", "density"), + ("gas", "velocity_magnitude"), +) +_nyx_fields = ( + ("boxlib", "Ne"), + ("boxlib", "Temp"), + ("boxlib", "particle_mass_density"), +) +_warpx_fields = (("mesh", "Ex"), ("mesh", "By"), ("mesh", "jz")) +_castro_fields = ( + ("boxlib", "Temp"), + ("gas", "density"), + ("boxlib", "particle_count"), +) + +radadvect = "RadAdvect/plt00000" + + +@requires_ds(radadvect) +def test_radadvect(): + ds = data_dir_load(radadvect) + assert_equal(str(ds), "plt00000") + for test in small_patch_amr(ds, _orion_fields): + test_radadvect.__name__ = test.description + yield test + + +rt = "RadTube/plt00500" + + +@requires_ds(rt) +def test_radtube(): + ds = data_dir_load(rt) + assert_equal(str(ds), "plt00500") + for test in small_patch_amr(ds, _orion_fields): + test_radtube.__name__ = test.description + yield test + + +star = "StarParticles/plrd01000" + + +@requires_ds(star) +def test_star(): + ds = data_dir_load(star) + assert_equal(str(ds), "plrd01000") + for test in small_patch_amr(ds, _orion_fields): + test_star.__name__ = test.description + yield test + + +LyA = "Nyx_LyA/plt00000" + + +@requires_ds(LyA) +def test_LyA(): + ds = data_dir_load(LyA) + assert_equal(str(ds), "plt00000") + for test in small_patch_amr( + ds, _nyx_fields, input_center="c", input_weight=("boxlib", "Ne") + ): + test_LyA.__name__ = test.description + yield test + + +@requires_file(LyA) +def test_nyx_particle_io(): + ds = data_dir_load(LyA) + + grid = ds.index.grids[0] + npart_grid_0 = 7908 # read directly from the header + assert_equal(grid[("all", "particle_position_x")].size, npart_grid_0) + assert_equal(grid["DM", "particle_position_y"].size, npart_grid_0) + assert_equal(grid["all", "particle_position_z"].size, npart_grid_0) + + ad = ds.all_data() + npart = 32768 # read directly from the header + assert_equal(ad[("all", "particle_velocity_x")].size, npart) + assert_equal(ad["DM", "particle_velocity_y"].size, npart) + assert_equal(ad["all", "particle_velocity_z"].size, npart) + + assert np.all(ad[("all", "particle_mass")] == ad[("all", "particle_mass")][0]) + + left_edge = ds.arr([0.0, 0.0, 0.0], "code_length") + right_edge = ds.arr([4.0, 4.0, 4.0], "code_length") + center = 0.5 * (left_edge + right_edge) + + reg = ds.region(center, left_edge, right_edge) + + assert np.all( + np.logical_and( + reg[("all", "particle_position_x")] <= right_edge[0], + reg[("all", "particle_position_x")] >= left_edge[0], + ) + ) + + assert np.all( + np.logical_and( + reg[("all", "particle_position_y")] <= right_edge[1], + reg[("all", "particle_position_y")] >= left_edge[1], + ) + ) + + assert np.all( + np.logical_and( + reg[("all", "particle_position_z")] <= right_edge[2], + reg[("all", "particle_position_z")] >= left_edge[2], + ) + ) + + +RT_particles = "RT_particles/plt00050" + + +@requires_ds(RT_particles) +def test_RT_particles(): + ds = data_dir_load(RT_particles) + assert_equal(str(ds), "plt00050") + for test in small_patch_amr(ds, _castro_fields): + test_RT_particles.__name__ = test.description + yield test + + +@requires_file(RT_particles) +def test_castro_particle_io(): + ds = data_dir_load(RT_particles) + + grid = ds.index.grids[2] + npart_grid_2 = 49 # read directly from the header + assert_equal(grid[("all", "particle_position_x")].size, npart_grid_2) + assert_equal(grid["Tracer", "particle_position_y"].size, npart_grid_2) + assert_equal(grid["all", "particle_position_y"].size, npart_grid_2) + + ad = ds.all_data() + npart = 49 # read directly from the header + assert_equal(ad[("all", "particle_velocity_x")].size, npart) + assert_equal(ad["Tracer", "particle_velocity_y"].size, npart) + assert_equal(ad["all", "particle_velocity_y"].size, npart) + + left_edge = ds.arr([0.0, 0.0, 0.0], "code_length") + right_edge = ds.arr([0.25, 1.0, 1.0], "code_length") + center = 0.5 * (left_edge + right_edge) + + reg = ds.region(center, left_edge, right_edge) + + assert np.all( + np.logical_and( + reg[("all", "particle_position_x")] <= right_edge[0], + reg[("all", "particle_position_x")] >= left_edge[0], + ) + ) + + assert np.all( + np.logical_and( + reg[("all", "particle_position_y")] <= right_edge[1], + reg[("all", "particle_position_y")] >= left_edge[1], + ) + ) + + +langmuir = "LangmuirWave/plt00020_v2" + + +@requires_ds(langmuir) +def test_langmuir(): + ds = data_dir_load(langmuir) + assert_equal(str(ds), "plt00020_v2") + for test in small_patch_amr( + ds, _warpx_fields, input_center="c", input_weight=("mesh", "Ex") + ): + test_langmuir.__name__ = test.description + yield test + + +plasma = "PlasmaAcceleration/plt00030_v2" + + +@requires_ds(plasma) +def test_plasma(): + ds = data_dir_load(plasma) + assert_equal(str(ds), "plt00030_v2") + for test in small_patch_amr( + ds, _warpx_fields, input_center="c", input_weight=("mesh", "Ex") + ): + test_plasma.__name__ = test.description + yield test + + +beam = "GaussianBeam/plt03008" + + +@requires_ds(beam) +def test_beam(): + ds = data_dir_load(beam) + assert_equal(str(ds), "plt03008") + for param in ("number of boxes", "maximum zones"): + # PR 2807 + # these parameters are only populated if the config file attached to this + # dataset is read correctly + assert param in ds.parameters + for test in small_patch_amr( + ds, _warpx_fields, input_center="c", input_weight=("mesh", "Ex") + ): + test_beam.__name__ = test.description + yield test + + +@requires_file(plasma) +def test_warpx_particle_io(): + ds = data_dir_load(plasma) + grid = ds.index.grids[0] + + # read directly from the header + npart0_grid_0 = 344 + npart1_grid_0 = 69632 + + assert_equal(grid["particle0", "particle_position_x"].size, npart0_grid_0) + assert_equal(grid["particle1", "particle_position_y"].size, npart1_grid_0) + assert_equal(grid["all", "particle_position_z"].size, npart0_grid_0 + npart1_grid_0) + + # read directly from the header + npart0 = 1360 + npart1 = 802816 + ad = ds.all_data() + assert_equal(ad["particle0", "particle_velocity_x"].size, npart0) + assert_equal(ad["particle1", "particle_velocity_y"].size, npart1) + assert_equal(ad["all", "particle_velocity_z"].size, npart0 + npart1) + + np.all(ad["particle1", "particle_mass"] == ad["particle1", "particle_mass"][0]) + np.all(ad["particle0", "particle_mass"] == ad["particle0", "particle_mass"][0]) + + left_edge = ds.arr([-7.5e-5, -7.5e-5, -7.5e-5], "code_length") + right_edge = ds.arr([2.5e-5, 2.5e-5, 2.5e-5], "code_length") + center = 0.5 * (left_edge + right_edge) + + reg = ds.region(center, left_edge, right_edge) + + assert np.all( + np.logical_and( + reg[("all", "particle_position_x")] <= right_edge[0], + reg[("all", "particle_position_x")] >= left_edge[0], + ) + ) + + assert np.all( + np.logical_and( + reg[("all", "particle_position_y")] <= right_edge[1], + reg[("all", "particle_position_y")] >= left_edge[1], + ) + ) + + assert np.all( + np.logical_and( + reg[("all", "particle_position_z")] <= right_edge[2], + reg[("all", "particle_position_z")] >= left_edge[2], + ) + ) + + +_raw_fields = [("raw", "Bx"), ("raw", "Ey"), ("raw", "jz")] + +laser = "Laser/plt00015" + + +@requires_ds(laser) +def test_raw_fields(): + for field in _raw_fields: + yield GridValuesTest(laser, field) + + +@requires_file(rt) +def test_OrionDataset(): + assert isinstance(data_dir_load(rt), OrionDataset) + + +@requires_file(LyA) +def test_NyxDataset(): + assert isinstance(data_dir_load(LyA), NyxDataset) + + +@requires_file("nyx_small/nyx_small_00000") +def test_NyxDataset_2(): + assert isinstance(data_dir_load("nyx_small/nyx_small_00000"), NyxDataset) + + +@requires_file(RT_particles) +def test_CastroDataset(): + assert isinstance(data_dir_load(RT_particles), CastroDataset) + + +@requires_file("castro_sod_x_plt00036") +def test_CastroDataset_2(): + assert isinstance(data_dir_load("castro_sod_x_plt00036"), CastroDataset) + + +@requires_file("castro_sedov_1d_cyl_plt00150") +def test_CastroDataset_3(): + assert isinstance(data_dir_load("castro_sedov_1d_cyl_plt00150"), CastroDataset) + + +@requires_file(plasma) +def test_WarpXDataset(): + assert isinstance(data_dir_load(plasma), WarpXDataset) + + +@disable_dataset_cache +@requires_file(plasma) +def test_magnetic_units(): + ds1 = load(plasma) + assert_allclose(ds1.magnetic_unit.value, 1.0) + assert str(ds1.magnetic_unit.units) == "T" + mag_unit1 = ds1.magnetic_unit.to("code_magnetic") + assert_allclose(mag_unit1.value, 1.0) + assert str(mag_unit1.units) == "code_magnetic" + ds2 = load(plasma, unit_system="cgs") + assert_allclose(ds2.magnetic_unit.value, 1.0e4) + assert str(ds2.magnetic_unit.units) == "G" + mag_unit2 = ds2.magnetic_unit.to("code_magnetic") + assert_allclose(mag_unit2.value, 1.0) + assert str(mag_unit2.units) == "code_magnetic" + + +@requires_ds(laser) +def test_WarpXDataset_2(): + assert isinstance(data_dir_load(laser), WarpXDataset) + + +@requires_file("plt.Cavity00010") +def test_AMReXDataset(): + ds = data_dir_load("plt.Cavity00010", kwargs={"cparam_filename": "inputs"}) + assert isinstance(ds, AMReXDataset) + + +@requires_file(rt) +def test_units_override(): + units_override_check(rt) + + +nyx_no_particles = "nyx_sedov_plt00086" + + +@requires_file(nyx_no_particles) +def test_nyx_no_part(): + assert isinstance(data_dir_load(nyx_no_particles), NyxDataset) + + fields = sorted( + [ + ("boxlib", "H"), + ("boxlib", "He"), + ("boxlib", "MachNumber"), + ("boxlib", "Ne"), + ("boxlib", "Rank"), + ("boxlib", "StateErr"), + ("boxlib", "Temp"), + ("boxlib", "X(H)"), + ("boxlib", "X(He)"), + ("boxlib", "density"), + ("boxlib", "divu"), + ("boxlib", "eint_E"), + ("boxlib", "eint_e"), + ("boxlib", "entropy"), + ("boxlib", "forcex"), + ("boxlib", "forcey"), + ("boxlib", "forcez"), + ("boxlib", "kineng"), + ("boxlib", "logden"), + ("boxlib", "magmom"), + ("boxlib", "magvel"), + ("boxlib", "magvort"), + ("boxlib", "pressure"), + ("boxlib", "rho_E"), + ("boxlib", "rho_H"), + ("boxlib", "rho_He"), + ("boxlib", "rho_e"), + ("boxlib", "soundspeed"), + ("boxlib", "x_velocity"), + ("boxlib", "xmom"), + ("boxlib", "y_velocity"), + ("boxlib", "ymom"), + ("boxlib", "z_velocity"), + ("boxlib", "zmom"), + ] + ) + + ds = data_dir_load(nyx_no_particles) + assert_equal(sorted(ds.field_list), fields) + + +msubch = "maestro_subCh_plt00248" + + +@requires_file(msubch) +def test_maestro_parameters(): + assert isinstance(data_dir_load(msubch), MaestroDataset) + ds = data_dir_load(msubch) + + # Check a string parameter + assert ds.parameters["plot_base_name"] == "subCh_hot_baserun_plt" + assert type(ds.parameters["plot_base_name"]) is str # noqa: E721 + + # Check boolean parameters: T or F + assert not ds.parameters["use_thermal_diffusion"] + assert type(ds.parameters["use_thermal_diffusion"]) is bool # noqa: E721 + + assert ds.parameters["do_burning"] + assert type(ds.parameters["do_burning"]) is bool # noqa: E721 + + # Check a float parameter with a decimal point + assert ds.parameters["sponge_kappa"] == float("10.00000000") + assert type(ds.parameters["sponge_kappa"]) is float # noqa: E721 + + # Check a float parameter with E exponent notation + assert ds.parameters["small_dt"] == float("0.1000000000E-09") + + # Check an int parameter + assert ds.parameters["s0_interp_type"] == 3 + assert type(ds.parameters["s0_interp_type"]) is int # noqa: E721 diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index 227555b62f..5b8d22ba54 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -7,6 +7,7 @@ import os import struct +import sys import warnings import weakref from pathlib import Path @@ -26,6 +27,9 @@ from .fields import AMRVACFieldInfo from .io import read_amrvac_namelist +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def _parse_geometry(geometry_tag: str) -> Geometry: """Translate AMRVAC's geometry tag to yt's format. @@ -142,7 +146,9 @@ def _parse_index(self): dim = self.dataset.dimensionality self.grids = np.empty(self.num_grids, dtype="object") - for igrid, (ytlevel, morton_index) in enumerate(zip(ytlevels, morton_indices)): + for igrid, (ytlevel, morton_index) in enumerate( + zip(ytlevels, morton_indices, strict=True) + ): dx = dx0 / self.dataset.refine_by**ytlevel left_edge = xmin + (morton_index - 1) * block_nx * dx @@ -203,20 +209,8 @@ def __init__( # note: geometry_override and parfiles are specific to this frontend self._geometry_override = geometry_override - super().__init__( - filename, - dataset_type, - units_override=units_override, - unit_system=unit_system, - default_species_fields=default_species_fields, - ) - - self._parfiles = parfiles + self._parfiles = [] - namelist = None - namelist_gamma = None - c_adiab = None - e_is_internal = None if parfiles is not None: parfiles = list(always_iterable(parfiles)) ppf = Path(parfiles[0]) @@ -228,7 +222,22 @@ def __init__( filename, ) parfiles = [Path(ytcfg["yt", "test_data_dir"]) / pf for pf in parfiles] + self._parfiles = parfiles + super().__init__( + filename, + dataset_type, + units_override=units_override, + unit_system=unit_system, + default_species_fields=default_species_fields, + ) + + namelist = None + namelist_gamma = None + c_adiab = None + e_is_internal = None + + if parfiles is not None: namelist = read_amrvac_namelist(parfiles) if "hd_list" in namelist: c_adiab = namelist["hd_list"].get("hd_adiab", 1.0) @@ -373,10 +382,11 @@ def _set_code_unit_attributes(self): # note: yt sets hydrogen mass equal to proton mass, amrvac doesn't. mp_cgs = self.quan(1.672621898e-24, "g") # This value is taken from AstroPy - He_abundance = 0.1 # hardcoded parameter in AMRVAC # get self.length_unit if overrides are supplied, otherwise use default length_unit = getattr(self, "length_unit", self.quan(1, "cm")) + namelist = read_amrvac_namelist(self._parfiles) + He_abundance = namelist.get("mhd_list", {}).get("he_abundance", 0.1) # 1. calculations for mass, density, numberdensity if "mass_unit" in self.units_override: diff --git a/yt/frontends/amrvac/datfile_utils.py b/yt/frontends/amrvac/datfile_utils.py index 379be74f93..21436fd2d5 100644 --- a/yt/frontends/amrvac/datfile_utils.py +++ b/yt/frontends/amrvac/datfile_utils.py @@ -1,7 +1,11 @@ import struct +import sys import numpy as np +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + # Size of basic types (in bytes) SIZE_LOGICAL = 4 SIZE_INT = 4 @@ -98,7 +102,7 @@ def get_header(istream): ] # Store the values corresponding to the names - for val, name in zip(vals, names): + for val, name in zip(vals, names, strict=True): h[name] = val return h diff --git a/yt/frontends/amrvac/io.py b/yt/frontends/amrvac/io.py index 1be3111a34..a65bdfe6b3 100644 --- a/yt/frontends/amrvac/io.py +++ b/yt/frontends/amrvac/io.py @@ -42,6 +42,9 @@ def read_amrvac_namelist(parfiles): for nml in namelists: unified_namelist.patch(nml) + if "filelist" not in unified_namelist: + return unified_namelist + # accumulate `&filelist:base_filename` base_filename = "".join( nml.get("filelist", {}).get("base_filename", "") for nml in namelists diff --git a/yt/frontends/art/data_structures.py b/yt/frontends/art/data_structures.py index 0bfe58bda9..d13a2f77e7 100644 --- a/yt/frontends/art/data_structures.py +++ b/yt/frontends/art/data_structures.py @@ -1,6 +1,7 @@ import glob import os import struct +import sys import weakref import numpy as np @@ -33,6 +34,9 @@ from yt.geometry.oct_geometry_handler import OctreeIndex from yt.geometry.particle_geometry_handler import ParticleIndex +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class ARTIndex(OctreeIndex): def __init__(self, ds, dataset_type="art"): @@ -328,7 +332,9 @@ def _parse_parameter_file(self): mylog.info("Discovered %i species of particles", len(ls_nonzero)) info_str = "Particle populations: " + "%9i " * len(ls_nonzero) mylog.info(info_str, *ls_nonzero) - self._particle_type_counts = dict(zip(self.particle_types_raw, ls_nonzero)) + self._particle_type_counts = dict( + zip(self.particle_types_raw, ls_nonzero, strict=True) + ) for k, v in particle_header_vals.items(): if k in self.parameters.keys(): if not self.parameters[k] == v: @@ -396,7 +402,9 @@ def __init__(self, ds, io, filename, file_id): super().__init__(ds, io, filename, file_id, range=None) self.total_particles = {} for ptype, count in zip( - ds.particle_types_raw, ds.parameters["total_particles"] + ds.particle_types_raw, + ds.parameters["total_particles"], + strict=True, ): self.total_particles[ptype] = count with open(filename, "rb") as f: @@ -754,7 +762,7 @@ def fill(self, content, ftfields, selector): content, self.domain.level_child_offsets, self.domain.level_count ) ns = (self.domain.ds.domain_dimensions.prod() // 8, 8) - for field, fi in zip(fields, field_idxs): + for field, fi in zip(fields, field_idxs, strict=True): source[field] = np.empty(ns, dtype="float64", order="C") dt = data[fi, :].reshape(self.domain.ds.domain_dimensions, order="F") for i in range(2): diff --git a/yt/frontends/art/io.py b/yt/frontends/art/io.py index 137cd096b0..13813677ab 100644 --- a/yt/frontends/art/io.py +++ b/yt/frontends/art/io.py @@ -1,5 +1,6 @@ import os import os.path +import sys from collections import defaultdict from functools import partial @@ -16,6 +17,9 @@ from yt.utilities.io_handler import BaseIOHandler from yt.utilities.logger import ytLogger as mylog +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class IOHandlerART(BaseIOHandler): _dataset_type = "art" @@ -125,7 +129,7 @@ def _get_field(self, field): if fname.startswith("particle_mass"): a = 0 data = np.zeros(npa, dtype="f8") - for ptb, size, m in zip(pbool, sizes, self.ws): + for ptb, size, m in zip(pbool, sizes, self.ws, strict=True): if ptb: data[a : a + size] = m a += size @@ -135,7 +139,7 @@ def _get_field(self, field): elif fname == "particle_type": a = 0 data = np.zeros(npa, dtype="int64") - for i, (ptb, size) in enumerate(zip(pbool, sizes)): + for i, (ptb, size) in enumerate(zip(pbool, sizes, strict=True)): if ptb: data[a : a + size] = i a += size @@ -218,7 +222,7 @@ def _get_field(self, field): if fname.startswith("particle_mass"): a = 0 data = np.zeros(npa, dtype="f8") - for ptb, size, m in zip(pbool, sizes, self.ws): + for ptb, size, m in zip(pbool, sizes, self.ws, strict=True): if ptb: data[a : a + size] = m a += size @@ -228,7 +232,7 @@ def _get_field(self, field): elif fname == "particle_type": a = 0 data = np.zeros(npa, dtype="int64") - for i, (ptb, size) in enumerate(zip(pbool, sizes)): + for i, (ptb, size) in enumerate(zip(pbool, sizes, strict=True)): if ptb: data[a : a + size] = i a += size @@ -635,7 +639,7 @@ def b2t(tb, n=1e2, logger=None, **kwargs): return a2t(b2a(tb)) if len(tb) < n: n = len(tb) - tbs = -1.0 * np.logspace(np.log10(-tb.min()), np.log10(-tb.max()), n) + tbs = -1.0 * np.logspace(np.log10(-tb.min()), np.log10(-tb.max()), int(n)) ages = [] for i, tbi in enumerate(tbs): ages += (a2t(b2a(tbi)),) diff --git a/yt/frontends/artio/data_structures.py b/yt/frontends/artio/data_structures.py index 878b86c1ef..e9acba4e68 100644 --- a/yt/frontends/artio/data_structures.py +++ b/yt/frontends/artio/data_structures.py @@ -1,4 +1,5 @@ import os +import sys import weakref from collections import defaultdict from typing import Optional @@ -21,6 +22,9 @@ from yt.geometry.geometry_handler import Index, YTDataChunk from yt.utilities.exceptions import YTParticleDepositionNotImplemented +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class ARTIOOctreeSubset(OctreeSubset): _domain_offset = 0 @@ -72,7 +76,7 @@ def fill(self, fields, selector): self.oct_handler.fill_sfc( levels, cell_inds, file_inds, domain_counts, field_indices, tr ) - tr = dict(zip(fields, tr)) + tr = dict(zip(fields, tr, strict=True)) return tr def fill_particles(self, fields): @@ -118,7 +122,7 @@ def fill(self, fields, selector): ] tr = self.oct_handler.fill_sfc(selector, field_indices) self.data_size = tr[0].size - tr = dict(zip(fields, tr)) + tr = dict(zip(fields, tr, strict=True)) return tr def deposit(self, positions, fields=None, method=None, kernel_name="cubic"): diff --git a/yt/frontends/artio/definitions.py b/yt/frontends/artio/definitions.py index e4b39266be..b054c5d8d3 100644 --- a/yt/frontends/artio/definitions.py +++ b/yt/frontends/artio/definitions.py @@ -1,3 +1,9 @@ +import sys + +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + + yt_to_art = { "Density": "HVAR_GAS_DENSITY", "TotalEnergy": "HVAR_GAS_ENERGY", @@ -27,7 +33,7 @@ "stars": "STAR", "nbody": "N-BODY", } -art_to_yt = dict(zip(yt_to_art.values(), yt_to_art.keys())) +art_to_yt = dict(zip(yt_to_art.values(), yt_to_art.keys(), strict=True)) class ARTIOconstants: diff --git a/yt/frontends/athena/data_structures.py b/yt/frontends/athena/data_structures.py index 3378ec1b11..57e8058a6b 100644 --- a/yt/frontends/athena/data_structures.py +++ b/yt/frontends/athena/data_structures.py @@ -1,5 +1,6 @@ import os import re +import sys import weakref import numpy as np @@ -17,6 +18,9 @@ from .fields import AthenaFieldInfo +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def chk23(strin): return strin.encode("utf-8") @@ -359,7 +363,9 @@ def _parse_index(self): gre_orig = self.ds.arr( np.round(gle_orig + dx * gdims[i], decimals=12), "code_length" ) - bbox = np.array([[le, re] for le, re in zip(gle_orig, gre_orig)]) + bbox = np.array( + [[le, re] for le, re in zip(gle_orig, gre_orig, strict=True)] + ) psize = get_psize(self.ds.domain_dimensions, self.ds.nprocs) gle, gre, shapes, slices, _ = decompose_array(gdims[i], psize, bbox) gle_all += gle @@ -432,7 +438,7 @@ def _reconstruct_parent_child(self): get_box_grids_level( self.grid_left_edge[i, :], self.grid_right_edge[i, :], - self.grid_levels[i] + 1, + self.grid_levels[i].item() + 1, self.grid_left_edge, self.grid_right_edge, self.grid_levels, diff --git a/yt/frontends/athena/io.py b/yt/frontends/athena/io.py index d002928736..b0045675ad 100644 --- a/yt/frontends/athena/io.py +++ b/yt/frontends/athena/io.py @@ -16,11 +16,6 @@ class IOHandlerAthena(BaseIOHandler): _data_string = "data:datatype=0" _read_table_offset = None - def _field_dict(self, fhandle): - keys = fhandle["field_types"].keys() - val = fhandle["field_types"].keys() - return dict(zip(keys, val)) - def _read_field_names(self, grid): pass diff --git a/yt/frontends/athena_pp/data_structures.py b/yt/frontends/athena_pp/data_structures.py index 37f6881205..8c7bffd1fa 100644 --- a/yt/frontends/athena_pp/data_structures.py +++ b/yt/frontends/athena_pp/data_structures.py @@ -1,4 +1,5 @@ import os +import sys import weakref import numpy as np @@ -15,6 +16,9 @@ from .fields import AthenaPPFieldInfo +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + geom_map = { "cartesian": "cartesian", "cylindrical": "cylindrical", @@ -215,7 +219,9 @@ def _parse_parameter_file(self): self._field_map = {} k = 0 for dname, num_var in zip( - self._handle.attrs["DatasetNames"], self._handle.attrs["NumVariables"] + self._handle.attrs["DatasetNames"], + self._handle.attrs["NumVariables"], + strict=True, ): for j in range(num_var): fname = self._handle.attrs["VariableNames"][k].decode("ascii", "ignore") diff --git a/yt/frontends/boxlib/_deprecation.py b/yt/frontends/boxlib/_deprecation.py new file mode 100644 index 0000000000..8303ed96bf --- /dev/null +++ b/yt/frontends/boxlib/_deprecation.py @@ -0,0 +1,13 @@ +from yt._maintenance.deprecation import issue_deprecation_warning, warnings + + +def boxlib_deprecation(): + with warnings.catch_warnings(): + warnings.simplefilter("always") + issue_deprecation_warning( + "The historic 'boxlib' frontend is \n" + "deprecated as it has been renamed 'amrex'. " + "Existing and future work should instead reference the 'amrex' frontend.", + stacklevel=4, + since="4.4.0", + ) diff --git a/yt/frontends/boxlib/api.py b/yt/frontends/boxlib/api.py index 334cc8bb63..a4005ca658 100644 --- a/yt/frontends/boxlib/api.py +++ b/yt/frontends/boxlib/api.py @@ -1,5 +1,6 @@ -from . import tests -from .data_structures import ( +from ..amrex import tests +from ..amrex import data_structures +from ..amrex.data_structures import ( AMReXDataset, AMReXHierarchy, BoxlibDataset, @@ -14,11 +15,11 @@ WarpXDataset, WarpXHierarchy, ) -from .fields import ( +from ..amrex.fields import ( BoxlibFieldInfo, CastroFieldInfo, MaestroFieldInfo, NyxFieldInfo, WarpXFieldInfo, ) -from .io import IOHandlerBoxlib +from ..amrex.io import IOHandlerBoxlib diff --git a/yt/frontends/boxlib/data_structures/__init__.py b/yt/frontends/boxlib/data_structures/__init__.py new file mode 100644 index 0000000000..253f1c6dc3 --- /dev/null +++ b/yt/frontends/boxlib/data_structures/__init__.py @@ -0,0 +1,18 @@ +from ...amrex.data_structures import ( + AMReXDataset, + AMReXHierarchy, + BoxlibDataset, + BoxlibGrid, + BoxlibHierarchy, + CastroDataset, + MaestroDataset, + NyxDataset, + NyxHierarchy, + OrionDataset, + OrionHierarchy, + WarpXDataset, + WarpXHierarchy, +) +from .._deprecation import boxlib_deprecation + +boxlib_deprecation() diff --git a/yt/frontends/boxlib/fields/__init__.py b/yt/frontends/boxlib/fields/__init__.py new file mode 100644 index 0000000000..4eaa65a9b2 --- /dev/null +++ b/yt/frontends/boxlib/fields/__init__.py @@ -0,0 +1,10 @@ +from ...amrex.fields import ( + BoxlibFieldInfo, + CastroFieldInfo, + MaestroFieldInfo, + NyxFieldInfo, + WarpXFieldInfo, +) +from .._deprecation import boxlib_deprecation + +boxlib_deprecation() diff --git a/yt/frontends/boxlib/io/__init__.py b/yt/frontends/boxlib/io/__init__.py new file mode 100644 index 0000000000..2c74949cfe --- /dev/null +++ b/yt/frontends/boxlib/io/__init__.py @@ -0,0 +1,4 @@ +from ...amrex.io import IOHandlerBoxlib +from .._deprecation import boxlib_deprecation + +boxlib_deprecation() diff --git a/yt/frontends/boxlib/tests/__init__.py b/yt/frontends/boxlib/tests/__init__.py index e69de29bb2..2ce3d37993 100644 --- a/yt/frontends/boxlib/tests/__init__.py +++ b/yt/frontends/boxlib/tests/__init__.py @@ -0,0 +1 @@ +from ...amrex import tests diff --git a/yt/frontends/boxlib/tests/test_boxlib_deprecation.py b/yt/frontends/boxlib/tests/test_boxlib_deprecation.py new file mode 100644 index 0000000000..c54e68133a --- /dev/null +++ b/yt/frontends/boxlib/tests/test_boxlib_deprecation.py @@ -0,0 +1,20 @@ +from importlib import import_module, reload + +from yt._maintenance.deprecation import warnings + + +def test_imports(): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + for index, mname in enumerate(["data_structures", "fields", "io"]): + mod_name = import_module("yt.frontends.boxlib." + mname) + if len(w) != index + 1: + reload(mod_name) + + assert len(w) == 3 and all( + [ + issubclass(w[0].category, DeprecationWarning), + issubclass(w[1].category, DeprecationWarning), + issubclass(w[2].category, DeprecationWarning), + ] + ) diff --git a/yt/frontends/cf_radial/data_structures.py b/yt/frontends/cf_radial/data_structures.py index 436517b300..1c1309510d 100644 --- a/yt/frontends/cf_radial/data_structures.py +++ b/yt/frontends/cf_radial/data_structures.py @@ -310,11 +310,14 @@ def _is_valid(cls, filename: str, *args, **kwargs) -> bool: nc4_file = NetCDF4FileHandler(filename) with nc4_file.open_ds(keepweakref=True) as ds: con = "Conventions" # the attribute to check for file conventions + # note that the attributes here are potentially space- or + # comma-delimited strings, so we concatenate a single string + # to search for a substring. cons = "" # the value of the Conventions attribute - for c in [con, con.lower()]: + for c in [con, con.lower(), "Sub_" + con.lower()]: if hasattr(ds, c): cons += getattr(ds, c) - is_cfrad = "CF/Radial" in cons + is_cfrad = "CF/Radial" in cons or "CF-Radial" in cons except (OSError, AttributeError, ImportError): return False diff --git a/yt/frontends/chombo/data_structures.py b/yt/frontends/chombo/data_structures.py index 8bffd1ab1a..df93c64cda 100644 --- a/yt/frontends/chombo/data_structures.py +++ b/yt/frontends/chombo/data_structures.py @@ -219,7 +219,7 @@ def _reconstruct_parent_child(self): get_box_grids_level( self.grid_left_edge[i, :], self.grid_right_edge[i, :], - self.grid_levels[i] + 1, + self.grid_levels[i].item() + 1, self.grid_left_edge, self.grid_right_edge, self.grid_levels, diff --git a/yt/frontends/chombo/io.py b/yt/frontends/chombo/io.py index a6f11d9a72..288761df70 100644 --- a/yt/frontends/chombo/io.py +++ b/yt/frontends/chombo/io.py @@ -1,4 +1,5 @@ import re +import sys import numpy as np @@ -6,6 +7,9 @@ from yt.utilities.io_handler import BaseIOHandler from yt.utilities.logger import ytLogger as mylog +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class IOHandlerChomboHDF5(BaseIOHandler): _dataset_type = "chombo_hdf5" @@ -101,7 +105,9 @@ def _read_data(self, grid, field): stop = start + boxsize data = lev[self._data_string][start:stop] data_no_ghost = data.reshape(shape, order="F") - ghost_slice = tuple(slice(g, d + g, None) for g, d in zip(self.ghost, dims)) + ghost_slice = tuple( + slice(g, g + d) for g, d in zip(self.ghost, dims, strict=True) + ) ghost_slice = ghost_slice[0 : self.dim] return data_no_ghost[ghost_slice] diff --git a/yt/frontends/enzo/answer_testing_support.py b/yt/frontends/enzo/answer_testing_support.py index bba9dba062..bba14f7db1 100644 --- a/yt/frontends/enzo/answer_testing_support.py +++ b/yt/frontends/enzo/answer_testing_support.py @@ -1,4 +1,5 @@ import os +import sys from functools import wraps import numpy as np @@ -15,6 +16,9 @@ temp_cwd, ) +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class AssertWrapper: """ @@ -102,7 +106,7 @@ def __call__(self): position = ad["index", "x"] for k in self.fields: field = ad[k].d - for xmin, xmax in zip(self.left_edges, self.right_edges): + for xmin, xmax in zip(self.left_edges, self.right_edges, strict=True): mask = (position >= xmin) * (position <= xmax) exact_field = np.interp(position[mask].ndview, exact["pos"], exact[k]) myname = f"ShockTubeTest_{k}" diff --git a/yt/frontends/enzo/data_structures.py b/yt/frontends/enzo/data_structures.py index 16f36b3305..ec28a10c54 100644 --- a/yt/frontends/enzo/data_structures.py +++ b/yt/frontends/enzo/data_structures.py @@ -1,6 +1,7 @@ import os import re import string +import sys import time import weakref from collections import defaultdict @@ -21,6 +22,9 @@ from .fields import EnzoFieldInfo +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class EnzoGrid(AMRGridPatch): """ @@ -342,7 +346,7 @@ def _rebuild_top_grids(self, level=0): mylog.info("Finished rebuilding") def _populate_grid_objects(self): - for g, f in zip(self.grids, self.filenames): + for g, f in zip(self.grids, self.filenames, strict=True): g._prepare_grid() g._setup_dx() g.set_filename(f[0]) diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 8733e40f84..5fb432650a 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -88,7 +88,7 @@ def _set_units(self): new_unit, self.unit_registry.lut[my_unit][0], dimensions.length, - "\\rm{%s}/(1+z)" % my_unit, + f"\\rm{{{my_unit}}}/(1+z)", prefixable=True, ) self.length_unit = self.quan( diff --git a/yt/frontends/enzo_e/data_structures.py b/yt/frontends/enzo_e/data_structures.py index a77c245d0e..b218f18163 100644 --- a/yt/frontends/enzo_e/data_structures.py +++ b/yt/frontends/enzo_e/data_structures.py @@ -408,7 +408,11 @@ def _parse_parameter_file(self): self.parameters["current_cycle"] = ablock.attrs["cycle"][0] gsi = ablock.attrs["enzo_GridStartIndex"] gei = ablock.attrs["enzo_GridEndIndex"] - self.ghost_zones = gsi[0] + assert len(gsi) == len(gei) == 3 # sanity check + # Enzo-E technically allows each axis to have different ghost zone + # depths (this feature is not really used in practice) + self.ghost_zones = gsi + assert (self.ghost_zones[self.dimensionality :] == 0).all() # sanity check self.root_block_dimensions = root_blocks self.active_grid_dimensions = gei - gsi + 1 self.grid_dimensions = ablock.attrs["enzo_GridDimension"] @@ -475,7 +479,7 @@ def _set_code_unit_attributes(self): setdefaultattr(self, "velocity_unit", self.quan(k["uvel"], "cm/s")) else: p = self.parameters - for d, u in zip(("length", "time"), ("cm", "s")): + for d, u in [("length", "cm"), ("time", "s")]: val = nested_dict_get(p, ("Units", d), default=1) setdefaultattr(self, f"{d}_unit", self.quan(val, u)) mass = nested_dict_get(p, ("Units", "mass")) diff --git a/yt/frontends/enzo_e/io.py b/yt/frontends/enzo_e/io.py index c2a8291b2b..1caf700b63 100644 --- a/yt/frontends/enzo_e/io.py +++ b/yt/frontends/enzo_e/io.py @@ -14,8 +14,17 @@ class EnzoEIOHandler(BaseIOHandler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self._base = self.ds.dimensionality * ( - slice(self.ds.ghost_zones, -self.ds.ghost_zones), + + # precompute the indexing specifying each field's active zone + # -> this assumes that each field in Enzo-E shares the same number of + # ghost-zones. Technically, Enzo-E allows each field to have a + # different number of ghost zones (but this feature isn't currently + # used & it currently doesn't record this information) + # -> our usage of a negative stop value ensures compatability with + # both cell-centered and face-centered fields + self._activezone_idx = tuple( + slice(num_zones, -num_zones) if num_zones > 0 else slice(None) + for num_zones in self.ds.ghost_zones[: self.ds.dimensionality] ) # Determine if particle masses are actually masses or densities. @@ -186,7 +195,7 @@ def _read_obj_field(self, obj, field, fid_data): dg.read(h5py.h5s.ALL, h5py.h5s.ALL, rdata) if close: fid.close() - data = rdata[self._base].T + data = rdata[self._activezone_idx].T if self.ds.dimensionality < 3: nshape = data.shape + (1,) * (3 - self.ds.dimensionality) data = np.reshape(data, nshape) diff --git a/yt/frontends/enzo_e/misc.py b/yt/frontends/enzo_e/misc.py index a1528b6963..c95de312cb 100644 --- a/yt/frontends/enzo_e/misc.py +++ b/yt/frontends/enzo_e/misc.py @@ -1,6 +1,11 @@ +import sys + import numpy as np from more_itertools import always_iterable +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def bdecode(block): """ @@ -89,7 +94,7 @@ def get_root_block_id(block, min_dim=3): def get_child_index(anc_id, desc_id): cid = "" - for aind, dind in zip(anc_id.split("_"), desc_id.split("_")): + for aind, dind in zip(anc_id.split("_"), desc_id.split("_"), strict=True): cid += dind[len(aind)] cid = int(cid, 2) return cid @@ -100,7 +105,7 @@ def is_parent(anc_block, desc_block): if (len(desc_block.replace(":", "")) - len(anc_block.replace(":", ""))) / dim != 1: return False - for aind, dind in zip(anc_block.split("_"), desc_block.split("_")): + for aind, dind in zip(anc_block.split("_"), desc_block.split("_"), strict=True): if not dind.startswith(aind): return False return True diff --git a/yt/frontends/exodus_ii/data_structures.py b/yt/frontends/exodus_ii/data_structures.py index 52ef9a0c66..7ff0846557 100644 --- a/yt/frontends/exodus_ii/data_structures.py +++ b/yt/frontends/exodus_ii/data_structures.py @@ -1,3 +1,5 @@ +import sys + import numpy as np from yt.data_objects.index_subobjects.unstructured_mesh import UnstructuredMesh @@ -11,6 +13,9 @@ from .fields import ExodusIIFieldInfo from .util import get_num_pseudo_dims, load_info_records, sanitize_string +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class ExodusIIUnstructuredMesh(UnstructuredMesh): _index_offset = 1 @@ -220,7 +225,7 @@ def _read_glo_var(self): return with self._handle.open_ds() as ds: values = ds.variables["vals_glo_var"][:].transpose() - for name, value in zip(names, values): + for name, value in zip(names, values, strict=True): self.parameters[name] = value def _load_info_records(self): diff --git a/yt/frontends/exodus_ii/io.py b/yt/frontends/exodus_ii/io.py index b9dbb81e68..ce8b32210a 100644 --- a/yt/frontends/exodus_ii/io.py +++ b/yt/frontends/exodus_ii/io.py @@ -1,8 +1,13 @@ +import sys + import numpy as np from yt.utilities.file_handler import NetCDF4FileHandler from yt.utilities.io_handler import BaseIOHandler +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class IOHandlerExodusII(BaseIOHandler): _particle_reader = False @@ -69,7 +74,7 @@ def _read_fluid_selection(self, chunks, selector, fields, size): ind += g.select(selector, data, rv[field], ind) # caches if fname in self.elem_fields: field_ind = self.elem_fields.index(fname) - for g, mesh_id in zip(objs, mesh_ids): + for g, mesh_id in zip(objs, mesh_ids, strict=True): fdata = ds.variables[ "vals_elem_var%deb%s" % (field_ind + 1, mesh_id) ][:] diff --git a/yt/frontends/fits/fields.py b/yt/frontends/fits/fields.py index bc09189ade..2b6260b731 100644 --- a/yt/frontends/fits/fields.py +++ b/yt/frontends/fits/fields.py @@ -70,10 +70,10 @@ def _world_f(field, data): return _world_f - for (i, axis), name in zip( - enumerate([self.ds.lon_axis, self.ds.lat_axis]), - [self.ds.lon_name, self.ds.lat_name], - ): + for i, axis, name in [ + (0, self.ds.lon_axis, self.ds.lon_name), + (1, self.ds.lat_axis, self.ds.lat_name), + ]: unit = str(wcs_2d.wcs.cunit[i]) if unit.lower() == "deg": unit = "degree" diff --git a/yt/frontends/fits/misc.py b/yt/frontends/fits/misc.py index 2da062b203..8362c6fa81 100644 --- a/yt/frontends/fits/misc.py +++ b/yt/frontends/fits/misc.py @@ -287,6 +287,6 @@ def _repr_html_(self): img = base64.b64encode(f.read()).decode() ret += ( r'
' % img + rf'src="data:image/png;base64,{img}">
' ) return ret diff --git a/yt/frontends/flash/data_structures.py b/yt/frontends/flash/data_structures.py index 7d617f07a1..6456e22984 100644 --- a/yt/frontends/flash/data_structures.py +++ b/yt/frontends/flash/data_structures.py @@ -1,4 +1,5 @@ import os +import sys import weakref import numpy as np @@ -15,6 +16,9 @@ from .fields import FLASHFieldInfo +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class FLASHGrid(AMRGridPatch): _id_offset = 1 @@ -246,7 +250,7 @@ def _set_code_unit_attributes(self): else: raise RuntimeError( "Runtime parameter unitsystem with " - "value %s is unrecognized" % self["unitsystem"] + f"value {self['unitsystem']} is unrecognized" ) else: b_factor = 1.0 @@ -274,7 +278,9 @@ def _find_parameter(self, ptype, pname, scalar=False): if nn not in self._handle: raise KeyError(nn) for tpname, pval in zip( - self._handle[nn][:, "name"], self._handle[nn][:, "value"] + self._handle[nn][:, "name"], + self._handle[nn][:, "value"], + strict=True, ): if tpname.decode("ascii", "ignore").strip() == pname: if hasattr(pval, "decode"): @@ -306,7 +312,9 @@ def _parse_parameter_file(self): if hn not in self._handle: continue for varname, val in zip( - self._handle[hn][:, "name"], self._handle[hn][:, "value"] + self._handle[hn][:, "name"], + self._handle[hn][:, "value"], + strict=True, ): vn = varname.strip() if hn.startswith("string"): @@ -333,7 +341,9 @@ def _parse_parameter_file(self): ) else: zipover = zip( - self._handle[hn][:, "name"], self._handle[hn][:, "value"] + self._handle[hn][:, "name"], + self._handle[hn][:, "value"], + strict=True, ) for varname, val in zipover: vn = varname.strip() diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index bdf3c263cd..cc10d34842 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -1,4 +1,5 @@ import os +import sys from collections import defaultdict from functools import cached_property @@ -12,6 +13,9 @@ from .definitions import SNAP_FORMAT_2_OFFSET, gadget_hdf5_ptypes +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class IOHandlerGadgetHDF5(IOHandlerSPH): _dataset_type = "gadget_hdf5" @@ -509,7 +513,7 @@ def _calculate_field_offsets( pos = offset fs = self._field_size offsets = {} - pcount = dict(zip(self._ptypes, pcount)) + pcount = dict(zip(self._ptypes, pcount, strict=True)) for field in self._fields: if field == "ParticleIDs" and self.ds.long_ids: diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 505382aeca..b53c08ec89 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -81,7 +81,7 @@ def _set_units(self): new_unit, self.unit_registry.lut[my_unit][0], dimensions.length, - "\\rm{%s}/(1+z)" % my_unit, + f"\\rm{{{my_unit}}}/(1+z)", prefixable=True, ) self.length_unit = self.quan( diff --git a/yt/frontends/gadget/testing.py b/yt/frontends/gadget/testing.py index 349b901769..f37aa01ae9 100644 --- a/yt/frontends/gadget/testing.py +++ b/yt/frontends/gadget/testing.py @@ -1,9 +1,14 @@ +import sys + import numpy as np from .data_structures import GadgetBinaryHeader, GadgetDataset from .definitions import gadget_field_specs, gadget_ptype_specs from .io import IOHandlerGadgetBinary +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + vector_fields = dict(IOHandlerGadgetBinary._vector_fields) block_ids = { @@ -72,7 +77,7 @@ def fake_gadget_binary( header["HubbleParam"] = 1 write_block(fp, header, endian, fmt, "HEAD") - npart = dict(zip(ptype_spec, npart)) + npart = dict(zip(ptype_spec, npart, strict=True)) for fs in field_spec: # Parse field name and particle type if isinstance(fs, str): diff --git a/yt/frontends/gdf/data_structures.py b/yt/frontends/gdf/data_structures.py index 1bb7a88b66..0817c2aa1b 100644 --- a/yt/frontends/gdf/data_structures.py +++ b/yt/frontends/gdf/data_structures.py @@ -115,7 +115,7 @@ def _populate_grid_objects(self): get_box_grids_level( self.grid_left_edge[gi, :], self.grid_right_edge[gi, :], - self.grid_levels[gi], + self.grid_levels[gi].item(), self.grid_left_edge, self.grid_right_edge, self.grid_levels, diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index c22b43407b..97a4672853 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -105,7 +105,7 @@ def _read_particle_fields(self, dobj, ptf): _read_particle_selection = IOHandlerGadgetFOFHaloHDF5._read_particle_selection -# ignoring type in this mixing to circunvent this error from mypy +# ignoring type in this mixing to circumvent this error from mypy # Definition of "_read_particle_fields" in base class "HaloDatasetIOHandler" # is incompatible with definition in base class "IOHandlerYTHaloCatalog" # diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index 21ecd9dec0..3e56217ddb 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -1,3 +1,5 @@ +import sys + import numpy as np from numpy.testing import assert_array_equal, assert_equal @@ -8,6 +10,9 @@ from yt.units.yt_array import YTArray, YTQuantity from yt.utilities.answer_testing.framework import data_dir_load +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def fake_halo_catalog(data): filename = "catalog.0.h5" @@ -38,7 +43,7 @@ def test_halo_catalog(self): units = ["g"] + ["cm"] * 3 data = { field: YTArray(rs.random_sample(n_halos), unit) - for field, unit in zip(fields, units) + for field, unit in zip(fields, units, strict=True) } fn = fake_halo_catalog(data) @@ -61,7 +66,7 @@ def test_halo_catalog_boundary_particles(self): units = ["g"] + ["cm"] * 3 data = { field: YTArray(rs.random_sample(n_halos), unit) - for field, unit in zip(fields, units) + for field, unit in zip(fields, units, strict=True) } data["particle_position_x"][0] = 1.0 diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 97cc7c5af2..851cf29b88 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -417,7 +417,7 @@ def __init__( elif num_ghost_zones < 0: raise RuntimeError( "Cannot initialize a domain subset with a negative number " - "of ghost zones, was called with num_ghost_zones=%s" % num_ghost_zones + f"of ghost zones, was called with {num_ghost_zones=}" ) @property @@ -1038,8 +1038,8 @@ def caster(val): if rheader["ordering type"] != "hilbert" and self._bbox is not None: raise NotImplementedError( - "The ordering %s is not compatible with the `bbox` argument." - % rheader["ordering type"] + f"The ordering {rheader['ordering type']} " + "is not compatible with the `bbox` argument." ) self.parameters.update(rheader) self.domain_left_edge = np.zeros(3, dtype="float64") diff --git a/yt/frontends/ramses/fields.py b/yt/frontends/ramses/fields.py index dca50a6282..4c871ec08b 100644 --- a/yt/frontends/ramses/fields.py +++ b/yt/frontends/ramses/fields.py @@ -1,10 +1,12 @@ import os +import warnings from functools import partial import numpy as np from yt import units from yt._typing import KnownFieldsT +from yt.fields.field_detector import FieldDetector from yt.fields.field_info_container import FieldInfoContainer from yt.frontends.ramses.io import convert_ramses_conformal_time_to_physical_age from yt.utilities.cython_fortran_utils import FortranFile @@ -196,18 +198,45 @@ def star_age(field, data): ) def setup_fluid_fields(self): - def _temperature(field, data): + def _temperature_over_mu(field, data): rv = data["gas", "pressure"] / data["gas", "density"] rv *= mass_hydrogen_cgs / boltzmann_constant_cgs return rv + self.add_field( + ("gas", "temperature_over_mu"), + sampling_type="cell", + function=_temperature_over_mu, + units=self.ds.unit_system["temperature"], + ) + found_cooling_fields = self.create_cooling_fields() + + if found_cooling_fields: + + def _temperature(field, data): + return data["gas", "temperature_over_mu"] * data["gas", "mu"] + + else: + + def _temperature(field, data): + if not isinstance(data, FieldDetector): + warnings.warn( + "Trying to calculate temperature but the cooling tables " + "couldn't be found or read. yt will return T/µ instead of " + "T — this is equivalent to assuming µ=1.0. To suppress this, " + "derive the temperature from temperature_over_mu with " + "some values for mu.", + category=RuntimeWarning, + stacklevel=1, + ) + return data["gas", "temperature_over_mu"] + self.add_field( ("gas", "temperature"), sampling_type="cell", function=_temperature, units=self.ds.unit_system["temperature"], ) - self.create_cooling_fields() self.species_names = [ known_species_names[fn] @@ -373,7 +402,8 @@ def _photon_flux(field, data): units=flux_unit, ) - def create_cooling_fields(self): + def create_cooling_fields(self) -> bool: + "Create cooling fields from the cooling files. Return True if successful." num = os.path.basename(self.ds.parameter_filename).split(".")[0].split("_")[1] filename = "%s/cooling_%05i.out" % ( os.path.dirname(self.ds.parameter_filename), @@ -382,15 +412,15 @@ def create_cooling_fields(self): if not os.path.exists(filename): mylog.warning("This output has no cooling fields") - return + return False # Function to create the cooling fields def _create_field(name, interp_object, unit): def _func(field, data): - shape = data["gas", "temperature"].shape + shape = data["gas", "temperature_over_mu"].shape d = { "lognH": np.log10(_X * data["gas", "density"] / mh).ravel(), - "logT": np.log10(data["gas", "temperature"]).ravel(), + "logT": np.log10(data["gas", "temperature_over_mu"]).ravel(), } rv = interp_object(d).reshape(shape) if name[-1] != "mu": @@ -425,7 +455,7 @@ def _func(field, data): "This cooling file format is no longer supported. " "Cooling field loading skipped." ) - return + return False if var.size == n1 * n2: tvals[tname] = { "data": var.reshape((n1, n2), order="F"), @@ -446,7 +476,7 @@ def _func(field, data): ["lognH", "logT"], truncate=True, ) - _create_field(("gas", "mu"), interp, tvals["mu"]["unit"]) + _create_field(("gas", "mu"), interp, "dimensionless") # Add the number density field, based on mu def _number_density(field, data): @@ -504,3 +534,5 @@ def _net_cool(field, data): function=_net_cool, units=cooling_function_units, ) + + return True diff --git a/yt/frontends/ramses/io.py b/yt/frontends/ramses/io.py index f39dd84b22..546ba333f5 100644 --- a/yt/frontends/ramses/io.py +++ b/yt/frontends/ramses/io.py @@ -1,7 +1,6 @@ -import os from collections import defaultdict from functools import lru_cache -from typing import Union +from typing import TYPE_CHECKING, Union import numpy as np @@ -17,6 +16,9 @@ from yt.utilities.logger import ytLogger as mylog from yt.utilities.physical_ratios import cm_per_km, cm_per_mpc +if TYPE_CHECKING: + import os + def convert_ramses_ages(ds, conformal_ages): issue_deprecation_warning( diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 69368b086c..643099a170 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -192,6 +192,9 @@ def fill_hydro(FortranFile f, cdef int jump_len, Ncells cdef np.uint8_t[::1] mask_level = np.zeros(nlevels, dtype=np.uint8) + # First, make sure fields are in the same order + fields = sorted(fields, key=lambda f: all_fields.index(f)) + # The ordering is very important here, as we'll write directly into the memory # address the content of the files. cdef np.float64_t[::1, :, :] buffer diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 42d2fb9c11..7cfe3ffad1 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -266,7 +266,10 @@ def build_iterator(): if record_len != exp_len: # Guess record vtype from length nbytes = record_len // hvals["npart"] - vtype = _default_dtypes[nbytes] + # NOTE: in some simulations (e.g. New Horizon), the record length is not + # a multiple of 1, 2, 4 or 8. In this case, fallback onto assuming + # double precision. + vtype = _default_dtypes.get(nbytes, "d") mylog.warning( "Supposed that `%s` has type %s given record size", diff --git a/yt/frontends/ramses/tests/test_hilbert.py b/yt/frontends/ramses/tests/test_hilbert.py index 55387c7e14..c4a04cc91e 100644 --- a/yt/frontends/ramses/tests/test_hilbert.py +++ b/yt/frontends/ramses/tests/test_hilbert.py @@ -1,3 +1,5 @@ +import sys + import numpy as np from numpy.testing import assert_equal @@ -5,6 +7,9 @@ from yt.frontends.ramses.hilbert import get_cpu_list_cuboid, hilbert3d from yt.testing import requires_file +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def test_hilbert3d(): # 8 different cases, checked against RAMSES' own implementation @@ -20,7 +25,7 @@ def test_hilbert3d(): ] outputs = [0, 1, 7, 6, 3, 2, 4, 5] - for i, o in zip(inputs, outputs): + for i, o in zip(inputs, outputs, strict=True): assert_equal(hilbert3d(i, 3).item(), o) @@ -48,7 +53,7 @@ def test_get_cpu_list(): + [ds.hilbert_indices[ds.parameters["ncpu"]][1]], dtype="float64", ) - for i, o in zip(inputs, outputs): + for i, o in zip(inputs, outputs, strict=True): bbox = i ls = list(get_cpu_list_cuboid(ds, bbox, bound_keys=bound_keys)) assert len(ls) > 0 diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index a1ed589995..064c57b78f 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -644,3 +644,42 @@ def test_print_stats(): ds.print_stats() # FIXME #3197: use `capsys` with pytest to make sure the print_stats function works as intended + + +@requires_file(output_00080) +def test_reading_order(): + # This checks the bug unvovered in #4880 + # This checks that the result of field accession doesn't + # depend on the order + + def _dummy_field(field, data): + # Note: this is a dummy field + # that doesn't really have any physical meaning + # but may trigger some bug in the field + # handling. + T = data["gas", "temperature"] + Z = data["gas", "metallicity"] + return T * 1**Z + + fields = [ + "Density", + "x-velocity", + "y-velocity", + "z-velocity", + "Pressure", + "Metallicity", + ] + ds = yt.load(output_00080, fields=fields) + + ds.add_field( + ("gas", "test"), function=_dummy_field, units=None, sampling_type="cell" + ) + + ad = ds.all_data() + v0 = ad["gas", "test"] + + ad = ds.all_data() + ad["gas", "temperature"] + v1 = ad["gas", "test"] + + np.testing.assert_allclose(v0, v1) diff --git a/yt/frontends/ramses/tests/test_outputs_pytest.py b/yt/frontends/ramses/tests/test_outputs_pytest.py index 40b100fe5d..668c78e2d0 100644 --- a/yt/frontends/ramses/tests/test_outputs_pytest.py +++ b/yt/frontends/ramses/tests/test_outputs_pytest.py @@ -55,3 +55,28 @@ def test_field_config_2(custom_ramses_fields_conf): assert ("ramses", f) in ds.field_list for f in custom_grav: assert ("gravity", f) in ds.field_list + + +@requires_file(output_00080) +@requires_file(ramses_new_format) +def test_warning_T2(): + ds1 = yt.load(output_00080) + ds2 = yt.load(ramses_new_format) + + # Should not raise warnings + ds1.r["gas", "temperature_over_mu"] + ds2.r["gas", "temperature_over_mu"] + + # We cannot read the cooling tables of output_00080 + # so this should raise a warning + with pytest.warns( + RuntimeWarning, + match=( + "Trying to calculate temperature but the cooling tables couldn't be " + r"found or read\. yt will return T/µ instead of T.*" + ), + ): + ds1.r["gas", "temperature"] + + # But this one should not + ds2.r["gas", "temperature"] diff --git a/yt/frontends/rockstar/definitions.py b/yt/frontends/rockstar/definitions.py index 7e6753f830..578f29b77e 100644 --- a/yt/frontends/rockstar/definitions.py +++ b/yt/frontends/rockstar/definitions.py @@ -108,7 +108,7 @@ if len(item) == 2: halo_dts_tmp[rev].append(item) elif len(item) == 3: - mi, ma = item[2] # type: ignore + mi, ma = item[2] if (mi <= rev) and (rev <= ma): halo_dts_tmp[rev].append(item[:2]) halo_dts[rev] = np.dtype(halo_dts_tmp[rev], align=True) diff --git a/yt/frontends/sph/data_structures.py b/yt/frontends/sph/data_structures.py index 7d0e9c696d..3c65b7171d 100644 --- a/yt/frontends/sph/data_structures.py +++ b/yt/frontends/sph/data_structures.py @@ -59,8 +59,8 @@ def sph_smoothing_style(self): def sph_smoothing_style(self, value): if value not in self._sph_smoothing_styles: raise ValueError( - "Smoothing style not implemented: %s, please " - "select one of the following: " % value, + f"Smoothing style not implemented: {value}, " + "please select one of the following: ", self._sph_smoothing_styles, ) diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index f1d4db1b13..3f077377a4 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -1,4 +1,5 @@ import os +import sys import time import uuid import weakref @@ -46,6 +47,9 @@ from .definitions import process_data, set_particle_types from .fields import StreamFieldInfo +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class StreamGrid(AMRGridPatch): """ @@ -240,7 +244,7 @@ def _reconstruct_parent_child(self): get_box_grids_level( self.grid_left_edge[i, :], self.grid_right_edge[i, :], - self.grid_levels[i] + 1, + self.grid_levels[i].item() + 1, self.grid_left_edge, self.grid_right_edge, self.grid_levels, @@ -422,7 +426,7 @@ def _set_code_unit_attributes(self): "magnetic_unit", ) cgs_units = ("cm", "g", "s", "cm/s", "gauss") - for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units): + for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units, strict=True): if isinstance(unit, str): if unit == "code_magnetic": # If no magnetic unit was explicitly specified diff --git a/yt/frontends/stream/definitions.py b/yt/frontends/stream/definitions.py index 63657231ab..ae0df1f87e 100644 --- a/yt/frontends/stream/definitions.py +++ b/yt/frontends/stream/definitions.py @@ -1,3 +1,4 @@ +import sys from collections import defaultdict import numpy as np @@ -13,6 +14,9 @@ from .fields import StreamFieldInfo +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def assign_particle_data(ds, pdata, bbox): """ @@ -135,7 +139,7 @@ def assign_particle_data(ds, pdata, bbox): else: grid_pdata = [pdata] - for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields)): + for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields), strict=True): ds.stream_handler.fields[gi].update(pd) ds.stream_handler.particle_types.update(set_particle_types(pd)) npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0) diff --git a/yt/frontends/swift/data_structures.py b/yt/frontends/swift/data_structures.py index 95bf9a8371..39d881c73c 100644 --- a/yt/frontends/swift/data_structures.py +++ b/yt/frontends/swift/data_structures.py @@ -2,11 +2,12 @@ from yt.data_objects.static_output import ParticleFile from yt.frontends.sph.data_structures import SPHDataset, SPHParticleIndex -from yt.frontends.sph.fields import SPHFieldInfo from yt.funcs import only_on_root from yt.utilities.logger import ytLogger as mylog from yt.utilities.on_demand_imports import _h5py as h5py +from .fields import SwiftFieldInfo + class SwiftParticleFile(ParticleFile): pass @@ -15,7 +16,7 @@ class SwiftParticleFile(ParticleFile): class SwiftDataset(SPHDataset): _load_requirements = ["h5py"] _index_class = SPHParticleIndex - _field_info_class = SPHFieldInfo + _field_info_class = SwiftFieldInfo _file_class = SwiftParticleFile _particle_mass_name = "Masses" @@ -97,7 +98,15 @@ def _parse_parameter_file(self): # Read from the HDF5 file, this gives us all the info we need. The rest # of this function is just parsing. header = self._get_info_attributes("Header") - runtime_parameters = self._get_info_attributes("RuntimePars") + # RuntimePars were removed from snapshots at SWIFT commit 6271388 + # between SWIFT versions 0.8.5 and 0.9.0 + with h5py.File(self.filename, mode="r") as handle: + has_runtime_pars = "RuntimePars" in handle.keys() + + if has_runtime_pars: + runtime_parameters = self._get_info_attributes("RuntimePars") + else: + runtime_parameters = {} policy = self._get_info_attributes("Policy") # These are the parameterfile parameters from *.yml at runtime @@ -113,7 +122,10 @@ def _parse_parameter_file(self): self.dimensionality = int(header["Dimension"]) # SWIFT is either all periodic, or not periodic at all - periodic = int(runtime_parameters["PeriodicBoundariesOn"]) + if has_runtime_pars: + periodic = int(runtime_parameters["PeriodicBoundariesOn"]) + else: + periodic = int(parameters["InitialConditions:periodic"]) if periodic: self._periodicity = [True] * self.dimensionality @@ -131,7 +143,14 @@ def _parse_parameter_file(self): self.current_redshift = float(header["Redshift"]) # These won't be present if self.cosmological_simulation is false self.omega_lambda = float(parameters["Cosmology:Omega_lambda"]) - self.omega_matter = float(parameters["Cosmology:Omega_m"]) + # Cosmology:Omega_m parameter deprecated at SWIFT commit d2783c2 + # Between SWIFT versions 0.9.0 and 1.0.0 + if "Cosmology:Omega_cdm" in parameters: + self.omega_matter = float(parameters["Cosmology:Omega_b"]) + float( + parameters["Cosmology:Omega_cdm"] + ) + else: + self.omega_matter = float(parameters["Cosmology:Omega_m"]) # This is "little h" self.hubble_constant = float(parameters["Cosmology:h"]) except KeyError: @@ -155,9 +174,10 @@ def _parse_parameter_file(self): # Store the un-parsed information should people want it. self.parameters = { "header": header, - "runtime_parameters": runtime_parameters, "policy": policy, "parameters": parameters, + # NOTE: runtime_parameters may be empty + "runtime_parameters": runtime_parameters, "hydro": hydro, "subgrid": subgrid, } diff --git a/yt/frontends/swift/fields.py b/yt/frontends/swift/fields.py new file mode 100644 index 0000000000..19ec84822c --- /dev/null +++ b/yt/frontends/swift/fields.py @@ -0,0 +1,27 @@ +from yt.frontends.sph.fields import SPHFieldInfo + + +class SwiftFieldInfo(SPHFieldInfo): + def __init__(self, ds, field_list, slice_info=None): + self.known_particle_fields += ( + ( + "InternalEnergies", + ("code_specific_energy", ["specific_thermal_energy"], None), + ), + ("Densities", ("code_mass / code_length**3", ["density"], None)), + ("SmoothingLengths", ("code_length", ["smoothing_length"], None)), + ) + super().__init__(ds, field_list, slice_info) + + def setup_particle_fields(self, ptype, *args, **kwargs): + super().setup_particle_fields(ptype, *args, **kwargs) + + if ptype in ("PartType0", "Gas"): + self.setup_gas_particle_fields(ptype) + + def setup_gas_particle_fields(self, ptype): + self.alias((ptype, "temperature"), (ptype, "Temperatures")) + self.alias(("gas", "temperature"), (ptype, "Temperatures")) + + for ax in ("x", "y", "z"): + self.alias((ptype, ax), (ptype, "particle_position_" + ax)) diff --git a/yt/frontends/swift/io.py b/yt/frontends/swift/io.py index 2cd07d01d9..439176e480 100644 --- a/yt/frontends/swift/io.py +++ b/yt/frontends/swift/io.py @@ -65,8 +65,14 @@ def _get_smoothing_length(self, sub_file, pdtype=None, pshape=None): with h5py.File(sub_file.filename, mode="r") as f: pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int64") pcount = np.clip(pcount - si, 0, ei - si) + keys = f[ptype].keys() + # SWIFT commit a94cc81 changed from "SmoothingLength" to "SmoothingLengths" + # between SWIFT versions 0.8.2 and 0.8.3 + if "SmoothingLengths" in keys: + hsml = f[ptype]["SmoothingLengths"][si:ei, ...] + else: + hsml = f[ptype]["SmoothingLength"][si:ei, ...] # we upscale to float64 - hsml = f[ptype]["SmoothingLength"][si:ei, ...] hsml = hsml.astype("float64", copy=False) return hsml diff --git a/yt/frontends/swift/tests/test_outputs.py b/yt/frontends/swift/tests/test_outputs.py index 1aa61284dc..d6dccb41e2 100644 --- a/yt/frontends/swift/tests/test_outputs.py +++ b/yt/frontends/swift/tests/test_outputs.py @@ -64,7 +64,7 @@ def test_non_cosmo_dataset_selection(): @requires_file(EAGLE_6) def test_cosmo_dataset(): ds = load(EAGLE_6) - assert type(ds) == SwiftDataset + assert type(ds) is SwiftDataset field = ("gas", "density") ad = ds.all_data() diff --git a/yt/frontends/tipsy/data_structures.py b/yt/frontends/tipsy/data_structures.py index 70d3e1cb77..7050bd4ac9 100644 --- a/yt/frontends/tipsy/data_structures.py +++ b/yt/frontends/tipsy/data_structures.py @@ -1,6 +1,7 @@ import glob import os import struct +import sys import numpy as np @@ -12,6 +13,9 @@ from .fields import TipsyFieldInfo +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class TipsyFile(ParticleFile): def __init__(self, ds, io, filename, file_id, range=None): @@ -118,11 +122,13 @@ def _parse_parameter_file(self): # the snapshot time and particle counts. f = open(self.parameter_filename, "rb") - hh = self.endian + "".join("%s" % (b) for a, b in self._header_spec) + hh = self.endian + "".join(str(b) for a, b in self._header_spec) hvals = { a: c for (a, b), c in zip( - self._header_spec, struct.unpack(hh, f.read(struct.calcsize(hh))) + self._header_spec, + struct.unpack(hh, f.read(struct.calcsize(hh))), + strict=True, ) } self.parameters.update(hvals) diff --git a/yt/frontends/tipsy/io.py b/yt/frontends/tipsy/io.py index 38896a5004..044932fa58 100644 --- a/yt/frontends/tipsy/io.py +++ b/yt/frontends/tipsy/io.py @@ -1,6 +1,7 @@ import glob import os import struct +import sys import numpy as np @@ -9,6 +10,9 @@ from yt.utilities.lib.particle_kdtree_tools import generate_smoothing_length from yt.utilities.logger import ytLogger as mylog +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class IOHandlerTipsyBinary(IOHandlerSPH): _dataset_type = "tipsy" @@ -54,7 +58,10 @@ def _fill_fields(self, fields, vals, hsml, mask, data_file): if mask is None: size = 0 elif isinstance(mask, slice): - size = vals[fields[0]].size + if fields[0] == "smoothing_length": + size = hsml.size + else: + size = vals[fields[0]].size else: size = mask.sum() rv = {} @@ -157,9 +164,12 @@ def _read_particle_data_file(self, data_file, ptf, selector=None): f = open(data_file.filename, "rb") # we need to open all aux files for chunking to work - aux_fh = {} - for afield in self._aux_fields: - aux_fh[afield] = open(data_file.filename + "." + afield, "rb") + _aux_fh = {} + + def aux_fh(afield): + if afield not in _aux_fh: + _aux_fh[afield] = open(data_file.filename + "." + afield, "rb") + return _aux_fh[afield] for ptype, field_list in sorted(ptf.items(), key=lambda a: poff.get(a[0], -1)): if data_file.total_particles[ptype] == 0: @@ -170,19 +180,19 @@ def _read_particle_data_file(self, data_file, ptf, selector=None): p = np.fromfile(f, self._pdtypes[ptype], count=count) auxdata = [] for afield in afields: - aux_fh[afield].seek(aux_fields_offsets[afield][ptype]) + aux_fh(afield).seek(aux_fields_offsets[afield][ptype]) if isinstance(self._aux_pdtypes[afield], np.dtype): auxdata.append( np.fromfile( - aux_fh[afield], self._aux_pdtypes[afield], count=count + aux_fh(afield), self._aux_pdtypes[afield], count=count ) ) else: - aux_fh[afield].seek(0) + aux_fh(afield).seek(0) sh = aux_fields_offsets[afield][ptype] if tp[ptype] > 0: aux = np.genfromtxt( - aux_fh[afield], skip_header=sh, max_rows=count + aux_fh(afield), skip_header=sh, max_rows=count ) if aux.ndim < 1: aux = np.array([aux]) @@ -209,7 +219,7 @@ def _read_particle_data_file(self, data_file, ptf, selector=None): # close all file handles f.close() - for fh in list(aux_fh.values()): + for fh in _aux_fh.values(): fh.close() return return_data @@ -322,7 +332,7 @@ def _count_particles(self, data_file): if None not in (si, ei): np.clip(pcount - si, 0, ei - si, out=pcount) ptypes = ["Gas", "Stars", "DarkMatter"] - npart = dict(zip(ptypes, pcount)) + npart = dict(zip(ptypes, pcount, strict=True)) return npart @classmethod diff --git a/yt/frontends/tipsy/tests/test_outputs.py b/yt/frontends/tipsy/tests/test_outputs.py index 7b7667b21f..73e8d6201b 100644 --- a/yt/frontends/tipsy/tests/test_outputs.py +++ b/yt/frontends/tipsy/tests/test_outputs.py @@ -112,3 +112,9 @@ def test_tipsy_index(): ds = data_dir_load(tipsy_gal) sl = ds.slice("z", 0.0) assert sl["gas", "density"].shape[0] != 0 + + +@requires_file(tipsy_gal) +def test_tipsy_smoothing_length(): + ds = data_dir_load(tipsy_gal) + _ = ds.all_data()["Gas", "smoothing_length"] diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index 6e8e081915..46009b4a61 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -1,4 +1,5 @@ import os +import sys import weakref from collections import defaultdict from functools import cached_property @@ -33,6 +34,9 @@ from .fields import YTDataContainerFieldInfo, YTGridFieldInfo +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + _grid_data_containers = ["arbitrary_grid", "covering_grid", "smoothed_covering_grid"] _set_attrs = {"periodicity": "_periodicity"} @@ -146,7 +150,7 @@ def _set_code_unit_attributes(self): ) cgs_units = ("cm", "g", "s", "cm/s", "gauss") base_units = np.ones(len(attrs)) - for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units): + for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units, strict=True): if attr in self.parameters and isinstance( self.parameters[attr], YTQuantity ): diff --git a/yt/funcs.py b/yt/funcs.py index 94547af091..b6d10b2755 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -1,5 +1,4 @@ import base64 -import builtins import contextlib import copy import errno @@ -24,6 +23,7 @@ from more_itertools import always_iterable, collapse, first from yt._maintenance.deprecation import issue_deprecation_warning +from yt._maintenance.ipython_compat import IS_IPYTHON from yt.config import ytcfg from yt.units import YTArray, YTQuantity from yt.utilities.exceptions import YTFieldNotFound, YTInvalidWidthError @@ -515,7 +515,7 @@ def update_git(path): def rebuild_modules(path, f): f.write("Rebuilding modules\n\n") p = subprocess.Popen( - [sys.executable, "setup.py", "build_ext", "-i"], + [sys.executable, "setup.py", "build_clib", "build_ext", "-i"], cwd=path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -1023,7 +1023,7 @@ def toggle_interactivity(): global interactivity interactivity = not interactivity if interactivity: - if "__IPYTHON__" in dir(builtins): + if IS_IPYTHON: import IPython shell = IPython.get_ipython() @@ -1086,19 +1086,27 @@ def array_like_field(data, x, field): return data.ds.quan(x, units) +def _full_type_name(obj: object = None, /, *, cls: Optional[type] = None) -> str: + if cls is not None and obj is not None: + raise TypeError("_full_type_name takes an object or a class, but not both") + if cls is None: + cls = obj.__class__ + prefix = f"{cls.__module__}." if cls.__module__ != "builtins" else "" + return f"{prefix}{cls.__name__}" + + def validate_3d_array(obj): if not is_sequence(obj) or len(obj) != 3: raise TypeError( - "Expected an array of size (3,), received '{}' of length {}".format( - str(type(obj)).split("'")[1], len(obj) - ) + f"Expected an array of size (3,), " + f"received {_full_type_name(obj)!r} of length {len(obj)}" ) def validate_float(obj): """Validates if the passed argument is a float value. - Raises an exception if `obj` is a single float value + Raises an exception if `obj` is not a single float value or a YTQuantity of size 1. Parameters @@ -1135,23 +1143,21 @@ def validate_float(obj): ): raise TypeError( "Expected a numeric value (or tuple of format " - "(float, String)), received an inconsistent tuple " - "'%s'." % str(obj) + f"(float, String)), received an inconsistent tuple {str(obj)!r}." ) else: return if is_sequence(obj) and (len(obj) != 1 or not isinstance(obj[0], numeric_type)): raise TypeError( "Expected a numeric value (or size-1 array), " - "received '{}' of length {}".format(str(type(obj)).split("'")[1], len(obj)) + f"received {_full_type_name(obj)!r} of length {len(obj)}" ) def validate_sequence(obj): if obj is not None and not is_sequence(obj): raise TypeError( - "Expected an iterable object, " - "received '%s'" % str(type(obj)).split("'")[1] + "Expected an iterable object, " f"received {_full_type_name(obj)!r}" ) @@ -1181,9 +1187,8 @@ def is_valid_field_key(key): def validate_object(obj, data_type): if obj is not None and not isinstance(obj, data_type): raise TypeError( - "Expected an object of '{}' type, received '{}'".format( - str(data_type).split("'")[1], str(type(obj)).split("'")[1] - ) + f"Expected an object of {_full_type_name(cls=data_type)!r} type, " + f"received {_full_type_name(obj)!r}" ) @@ -1209,13 +1214,13 @@ def validate_center(center): raise TypeError( "Expected 'center' to be in ['c', 'center', " "'m', 'max', 'min'] or the prefix to be " - "'max_'/'min_', received '%s'." % center + f"'max_'/'min_', received {center!r}." ) elif not isinstance(center, (numeric_type, YTQuantity)) and not is_sequence(center): raise TypeError( "Expected 'center' to be a numeric object of type " "list/tuple/np.ndarray/YTArray/YTQuantity, " - "received '%s'." % str(type(center)).split("'")[1] + f"received {_full_type_name(center)}." ) diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index 95812c4d91..4fed1977c4 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -12,6 +12,7 @@ pixelize_element_mesh, pixelize_element_mesh_line, pixelize_off_axis_cartesian, + pixelize_sph_kernel_cutting, pixelize_sph_kernel_projection, pixelize_sph_kernel_slice, ) @@ -323,11 +324,20 @@ def _ortho_pixelize( # We should be using fcoords field = data_source._determine_fields(field)[0] finfo = data_source.ds.field_info[field] - period = self.period[:2].copy() # dummy here - period[0] = self.period[self.x_axis[dim]] - period[1] = self.period[self.y_axis[dim]] - if hasattr(period, "in_units"): - period = period.in_units("code_length").d + # some coordinate handlers use only projection-plane periods, + # others need all box periods. + period2 = self.period[:2].copy() # dummy here + period2[0] = self.period[self.x_axis[dim]] + period2[1] = self.period[self.y_axis[dim]] + period3 = self.period[:].copy() # dummy here + period3[0] = self.period[self.x_axis[dim]] + period3[1] = self.period[self.y_axis[dim]] + zax = list({0, 1, 2} - {self.x_axis[dim], self.y_axis[dim]})[0] + period3[2] = self.period[zax] + if hasattr(period2, "in_units"): + period2 = period2.in_units("code_length").d + if hasattr(period3, "in_units"): + period3 = period3.in_units("code_length").d buff = np.full((size[1], size[0]), np.nan, dtype="float64") particle_datasets = (ParticleDataset, StreamParticlesDataset) @@ -349,26 +359,49 @@ def _ortho_pixelize( coord, bounds, int(antialias), - period, + period2, int(periodic), return_mask=True, ) elif isinstance(data_source.ds, particle_datasets) and is_sph_field: + # SPH handling ptype = field[0] if ptype == "gas": ptype = data_source.ds._sph_ptypes[0] px_name = self.axis_name[self.x_axis[dim]] py_name = self.axis_name[self.y_axis[dim]] + # need z coordinates for depth, + # but name isn't saved in the handler -> use the 'other one' + pz_name = list(set(self.axis_order) - {px_name, py_name})[0] + + # ignore default True periodic argument + # (not actually supplied by a call from + # FixedResolutionBuffer), and use the dataset periodicity + # instead + xa = self.x_axis[dim] + ya = self.y_axis[dim] + # axorder = data_source.ds.coordinates.axis_order + za = list({0, 1, 2} - {xa, ya})[0] + ds_periodic = data_source.ds.periodicity + _periodic = np.array(ds_periodic) + _periodic[0] = ds_periodic[xa] + _periodic[1] = ds_periodic[ya] + _periodic[2] = ds_periodic[za] ounits = data_source.ds.field_info[field].output_units bnds = data_source.ds.arr(bounds, "code_length").tolist() - if isinstance(data_source, YTParticleProj): + kernel_name = None + if hasattr(data_source.ds, "kernel_name"): + kernel_name = data_source.ds.kernel_name + if kernel_name is None: + kernel_name = "cubic" + + if isinstance(data_source, YTParticleProj): # projection weight = data_source.weight_field moment = data_source.moment le, re = data_source.data_source.get_bbox() - xa = self.x_axis[dim] - ya = self.y_axis[dim] # If we're not periodic, we need to clip to the boundary edges # or we get errors about extending off the edge of the region. + # (depth/z range is handled by region setting) if not self.ds.periodicity[xa]: le[xa] = max(bounds[0], self.ds.domain_left_edge[xa]) re[xa] = min(bounds[1], self.ds.domain_right_edge[xa]) @@ -389,6 +422,10 @@ def _ortho_pixelize( data_source=data_source.data_source, ) proj_reg.set_field_parameter("axis", data_source.axis) + # need some z bounds for SPH projection + # -> use source bounds + bnds3 = bnds + [le[za], re[za]] + buff = np.zeros(size, dtype="float64") mask_uint8 = np.zeros_like(buff, dtype="uint8") if weight is None: @@ -399,13 +436,15 @@ def _ortho_pixelize( mask_uint8, chunk[ptype, px_name].to("code_length"), chunk[ptype, py_name].to("code_length"), + chunk[ptype, pz_name].to("code_length"), chunk[ptype, "smoothing_length"].to("code_length"), chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[field].in_units(ounits), - bnds, - check_period=int(periodic), - period=period, + bnds3, + _check_period=_periodic.astype("int"), + period=period3, + kernel_name=kernel_name, ) # We use code length here, but to get the path length right # we need to multiply by the conversion factor between @@ -430,14 +469,16 @@ def _ortho_pixelize( mask_uint8, chunk[ptype, px_name].to("code_length"), chunk[ptype, py_name].to("code_length"), + chunk[ptype, pz_name].to("code_length"), chunk[ptype, "smoothing_length"].to("code_length"), chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[field].in_units(ounits), - bnds, - check_period=int(periodic), - period=period, + bnds3, + _check_period=_periodic.astype("int"), + period=period3, weight_field=chunk[weight].in_units(wounits), + kernel_name=kernel_name, ) mylog.info( "Making a fixed resolution buffer of (%s) %d by %d", @@ -452,13 +493,15 @@ def _ortho_pixelize( mask_uint8, chunk[ptype, px_name].to("code_length"), chunk[ptype, py_name].to("code_length"), + chunk[ptype, pz_name].to("code_length"), chunk[ptype, "smoothing_length"].to("code_length"), chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[weight].in_units(wounits), - bnds, - check_period=int(periodic), - period=period, + bnds3, + _check_period=_periodic.astype("int"), + period=period3, + kernel_name=kernel_name, ) normalization_2d_utility(buff, weight_buff) if moment == 2: @@ -471,14 +514,16 @@ def _ortho_pixelize( mask_uint8, chunk[ptype, px_name].to("code_length"), chunk[ptype, py_name].to("code_length"), + chunk[ptype, pz_name].to("code_length"), chunk[ptype, "smoothing_length"].to("code_length"), chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[field].in_units(ounits) ** 2, - bnds, - check_period=int(periodic), - period=period, + bnds3, + _check_period=_periodic.astype("int"), + period=period3, weight_field=chunk[weight].in_units(wounits), + kernel_name=kernel_name, ) normalization_2d_utility(buff2, weight_buff) buff = compute_stddev_image(buff2, buff) @@ -494,32 +539,39 @@ def _ortho_pixelize( buff_den = np.zeros(size, dtype="float64") for chunk in data_source.chunks([], "io"): + hsmlname = "smoothing_length" pixelize_sph_kernel_slice( buff, mask_uint8, - chunk[ptype, px_name].to("code_length"), - chunk[ptype, py_name].to("code_length"), - chunk[ptype, "smoothing_length"].to("code_length"), - chunk[ptype, "mass"].to("code_mass"), - chunk[ptype, "density"].to("code_density"), - chunk[field].in_units(ounits), + chunk[ptype, px_name].to("code_length").v, + chunk[ptype, py_name].to("code_length").v, + chunk[ptype, pz_name].to("code_length").v, + chunk[ptype, hsmlname].to("code_length").v, + chunk[ptype, "mass"].to("code_mass").v, + chunk[ptype, "density"].to("code_density").v, + chunk[field].in_units(ounits).v, bnds, - check_period=int(periodic), - period=period, + data_source.coord.to("code_length").v, + _check_period=_periodic.astype("int"), + period=period3, + kernel_name=kernel_name, ) if normalize: pixelize_sph_kernel_slice( buff_den, mask_uint8, - chunk[ptype, px_name].to("code_length"), - chunk[ptype, py_name].to("code_length"), - chunk[ptype, "smoothing_length"].to("code_length"), - chunk[ptype, "mass"].to("code_mass"), - chunk[ptype, "density"].to("code_density"), + chunk[ptype, px_name].to("code_length").v, + chunk[ptype, py_name].to("code_length").v, + chunk[ptype, pz_name].to("code_length").v, + chunk[ptype, hsmlname].to("code_length").v, + chunk[ptype, "mass"].to("code_mass").v, + chunk[ptype, "density"].to("code_density").v, np.ones(chunk[ptype, "density"].shape[0]), bnds, - check_period=int(periodic), - period=period, + data_source.coord.to("code_length").v, + _check_period=_periodic.astype("int"), + period=period3, + kernel_name=kernel_name, ) if normalize: @@ -593,8 +645,8 @@ def _ortho_pixelize( mask = mask.transpose() else: raise NotImplementedError( - "A pixelization routine has not been implemented for %s " - "data objects" % str(type(data_source)) + "A pixelization routine has not been implemented for " + f"{type(data_source)} data objects" ) buff = buff.transpose() mask = mask.transpose() @@ -608,7 +660,7 @@ def _ortho_pixelize( data_source[field], bounds, int(antialias), - period, + period2, int(periodic), return_mask=True, ) @@ -616,29 +668,130 @@ def _ortho_pixelize( return buff, mask def _oblique_pixelize(self, data_source, field, bounds, size, antialias): + from yt.data_objects.selection_objects.slices import YTCuttingPlane + from yt.frontends.sph.data_structures import ParticleDataset + from yt.frontends.stream.data_structures import StreamParticlesDataset from yt.frontends.ytdata.data_structures import YTSpatialPlotDataset - indices = np.argsort(data_source["pdx"])[::-1].astype("int64", copy=False) - buff = np.full((size[1], size[0]), np.nan, dtype="float64") - ftype = "index" - if isinstance(data_source.ds, YTSpatialPlotDataset): - ftype = "gas" - mask = pixelize_off_axis_cartesian( - buff, - data_source[ftype, "x"], - data_source[ftype, "y"], - data_source[ftype, "z"], - data_source["px"], - data_source["py"], - data_source["pdx"], - data_source["pdy"], - data_source["pdz"], - data_source.center, - data_source._inv_mat, - indices, - data_source[field], - bounds, - ) + # Determine what sort of data we're dealing with + # -> what backend to use + # copied from the _ortho_pixelize method + field = data_source._determine_fields(field)[0] + _finfo = data_source.ds.field_info[field] + is_sph_field = _finfo.is_sph_field + particle_datasets = (ParticleDataset, StreamParticlesDataset) + # finfo = self.ds._get_field_info(field) + + # SPH data + # only for slices: a function in off_axis_projection.py + # handles projections + if ( + isinstance(data_source.ds, particle_datasets) + and is_sph_field + and isinstance(data_source, YTCuttingPlane) + ): + normalize = getattr(self.ds, "use_sph_normalization", True) + le = data_source.ds.domain_left_edge.to("code_length") + re = data_source.ds.domain_right_edge.to("code_length") + boxbounds = np.array([le[0], re[0], le[1], re[1], le[2], re[2]]) + periodic = data_source.ds.periodicity + ptype = field[0] + if ptype == "gas": + ptype = data_source.ds._sph_ptypes[0] + axorder = data_source.ds.coordinates.axis_order + ounits = data_source.ds.field_info[field].output_units + # input bounds are in code length units already + widthxy = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2])) + kernel_name = None + if hasattr(data_source.ds, "kernel_name"): + kernel_name = data_source.ds.kernel_name + if kernel_name is None: + kernel_name = "cubic" + # data_source should be a YTCuttingPlane object + # dimensionless unyt normal/north + # -> numpy array cython can deal with + normal_vector = data_source.normal.v + north_vector = data_source._y_vec.v + center = data_source.center.to("code_length") + + buff = np.zeros(size, dtype="float64") + mask_uint8 = np.zeros_like(buff, dtype="uint8") + if normalize: + buff_den = np.zeros(size, dtype="float64") + + for chunk in data_source.chunks([], "io"): + pixelize_sph_kernel_cutting( + buff, + mask_uint8, + chunk[ptype, axorder[0]].to("code_length").v, + chunk[ptype, axorder[1]].to("code_length").v, + chunk[ptype, axorder[2]].to("code_length").v, + chunk[ptype, "smoothing_length"].to("code_length").v, + chunk[ptype, "mass"].to("code_mass"), + chunk[ptype, "density"].to("code_density"), + chunk[field].in_units(ounits), + center, + widthxy, + normal_vector, + north_vector, + boxbounds, + periodic, + kernel_name=kernel_name, + check_period=1, + ) + if normalize: + pixelize_sph_kernel_cutting( + buff_den, + mask_uint8, + chunk[ptype, axorder[0]].to("code_length"), + chunk[ptype, axorder[1]].to("code_length"), + chunk[ptype, axorder[2]].to("code_length"), + chunk[ptype, "smoothing_length"].to("code_length"), + chunk[ptype, "mass"].to("code_mass"), + chunk[ptype, "density"].to("code_density"), + np.ones(chunk[ptype, "density"].shape[0]), + center, + widthxy, + normal_vector, + north_vector, + boxbounds, + periodic, + kernel_name=kernel_name, + check_period=1, + ) + + if normalize: + normalization_2d_utility(buff, buff_den) + + mask = mask_uint8.astype("bool", copy=False) + # swap axes for image plotting + mask = mask.swapaxes(0, 1) + buff = buff.swapaxes(0, 1) + + # whatever other data this code could handle before the + # SPH option was added + else: + indices = np.argsort(data_source["pdx"])[::-1].astype("int64", copy=False) + buff = np.full((size[1], size[0]), np.nan, dtype="float64") + ftype = "index" + if isinstance(data_source.ds, YTSpatialPlotDataset): + ftype = "gas" + mask = pixelize_off_axis_cartesian( + buff, + data_source[ftype, "x"], + data_source[ftype, "y"], + data_source[ftype, "z"], + data_source["px"], + data_source["py"], + data_source["pdx"], + data_source["pdy"], + data_source["pdz"], + data_source.center, + data_source._inv_mat, + indices, + data_source[field], + bounds, + ) return buff, mask def convert_from_cartesian(self, coord): diff --git a/yt/geometry/coordinates/tests/test_sph_pixelization.py b/yt/geometry/coordinates/tests/test_sph_pixelization.py index 6de8b29f77..1d1c82be5c 100644 --- a/yt/geometry/coordinates/tests/test_sph_pixelization.py +++ b/yt/geometry/coordinates/tests/test_sph_pixelization.py @@ -1,7 +1,19 @@ +import numpy as np + import yt -from yt.testing import assert_rel_equal, requires_file +from yt.testing import ( + assert_rel_equal, + cubicspline_python, + fake_sph_flexible_grid_ds, + integrate_kernel, + requires_file, +) from yt.utilities.math_utils import compute_stddev_image +## off-axis projection tests for SPH data are in +## yt/visualization/tests/test_offaxisprojection.py + + magneticum = "MagneticumCluster/snap_132" mag_kwargs = { @@ -36,3 +48,172 @@ def _vysq(field, data): ) sigy = compute_stddev_image(prj1.frb["gas", "vysq"], prj1.frb["gas", "velocity_y"]) assert_rel_equal(sigy, prj2.frb["gas", "velocity_y"].d, 10) + + +def test_sph_projection_basic1(): + """ + small, uniform grid: expected values for given dl? + pixel centers at 0.5, 1., 1.5, 2., 2.5 + particles at 0.5, 1.5, 2.5 + """ + bbox = np.array([[0.0, 3.0]] * 3) + ds = fake_sph_flexible_grid_ds(hsml_factor=1.0, nperside=3, bbox=bbox) + # works, but no depth control (at least without specific filters) + proj = ds.proj(("gas", "density"), 2) + frb = proj.to_frb( + width=(2.5, "cm"), + resolution=(5, 5), + height=(2.5, "cm"), + center=np.array([1.5, 1.5, 1.5]), + periodic=False, + ) + out = frb.get_image(("gas", "density")) + + expected_out = np.zeros((5, 5), dtype=np.float64) + dl_1part = integrate_kernel(cubicspline_python, 0.0, 0.5) + linedens_1part = dl_1part * 1.0 # unit mass, density + linedens = 3.0 * linedens_1part + expected_out[::2, ::2] = linedens + + assert_rel_equal(expected_out, out.v, 5) + # return out + + +def test_sph_projection_basic2(): + """ + small, uniform grid: expected values for given dl? + pixel centers at 0.5, 1., 1.5, 2., 2.5 + particles at 0.5, 1.5, 2.5 + but hsml radii are 0.25 -> try non-zero impact parameters, + other pixels are still zero. + """ + bbox = np.array([[0.0, 3.0]] * 3) + ds = fake_sph_flexible_grid_ds(hsml_factor=0.5, nperside=3, bbox=bbox) + proj = ds.proj(("gas", "density"), 2) + frb = proj.to_frb( + width=(2.5, "cm"), + resolution=(5, 5), + height=(2.5, "cm"), + center=np.array([1.375, 1.375, 1.5]), + periodic=False, + ) + out = frb.get_image(("gas", "density")) + + expected_out = np.zeros((5, 5), dtype=np.float64) + dl_1part = integrate_kernel(cubicspline_python, np.sqrt(2) * 0.125, 0.25) + linedens_1part = dl_1part * 1.0 # unit mass, density + linedens = 3.0 * linedens_1part + expected_out[::2, ::2] = linedens + + # print(expected_out) + # print(out.v) + assert_rel_equal(expected_out, out.v, 4) + # return out + + +def get_dataset_sphrefine(reflevel: int = 1): + """ + constant density particle grid, + with increasing particle sampling + """ + lenfact = (1.0 / 3.0) ** (reflevel - 1) + massfact = lenfact**3 + nperside = 3**reflevel + + e1hat = np.array([lenfact, 0, 0]) + e2hat = np.array([0, lenfact, 0]) + e3hat = np.array([0, 0, lenfact]) + hsml_factor = lenfact + bbox = np.array([[0.0, 3.0]] * 3) + offsets = np.ones(3, dtype=np.float64) * 0.5 # in units of ehat + + def refmass(i: int, j: int, k: int) -> float: + return massfact + + unitrho = 1.0 / massfact # want density 1 for decreasing mass + + ds = fake_sph_flexible_grid_ds( + hsml_factor=hsml_factor, + nperside=nperside, + periodic=True, + e1hat=e1hat, + e2hat=e2hat, + e3hat=e3hat, + offsets=offsets, + massgenerator=refmass, + unitrho=unitrho, + bbox=bbox, + ) + return ds + + +def getdata_test_gridproj2(): + # initial pixel centers at 0.5, 1., 1.5, 2., 2.5 + # particles at 0.5, 1.5, 2.5 + # refine particle grid, check if pixel values remain the + # same in the pixels passing through initial particle centers + outlist = [] + dss = [] + for rl in range(1, 4): + ds = get_dataset_sphrefine(reflevel=rl) + proj = ds.proj(("gas", "density"), 2) + frb = proj.to_frb( + width=(2.5, "cm"), + resolution=(5, 5), + height=(2.5, "cm"), + center=np.array([1.5, 1.5, 1.5]), + periodic=False, + ) + out = frb.get_image(("gas", "density")) + outlist.append(out) + dss.append(ds) + return outlist, dss + + +def test_sph_gridproj_reseffect1(): + """ + Comparing same pixel centers with higher particle resolution. + The pixel centers are at x/y coordinates [0.5, 1., 1.5, 2., 2.5] + at the first level, the spacing halves at each level. + Checking the pixels at [0.5, 1.5, 2.5], + which should have the same values at each resolution. + """ + imgs, _ = getdata_test_gridproj2() + ref = imgs[-1] + for img in imgs: + assert_rel_equal( + img[:: img.shape[0] // 2, :: img.shape[1] // 2], + ref[:: ref.shape[0] // 2, :: ref.shape[1] // 2], + 4, + ) + + +def test_sph_gridproj_reseffect2(): + """ + refine the pixel grid instead of the particle grid + """ + ds = get_dataset_sphrefine(reflevel=2) + proj = ds.proj(("gas", "density"), 2) + imgs = {} + maxrl = 5 + for rl in range(1, maxrl + 1): + npix = 1 + 2 ** (rl + 1) + margin = 0.5 - 0.5 ** (rl + 1) + frb = proj.to_frb( + width=(3.0 - 2.0 * margin, "cm"), + resolution=(npix, npix), + height=(3.0 - 2.0 * margin, "cm"), + center=np.array([1.5, 1.5, 1.5]), + periodic=False, + ) + out = frb.get_image(("gas", "density")) + imgs[rl] = out + ref = imgs[maxrl] + pixspace_ref = 2 ** (maxrl) + for rl in imgs: + img = imgs[rl] + pixspace = 2 ** (rl) + # print(f'Grid refinement level {rl}:') + assert_rel_equal( + img[::pixspace, ::pixspace], ref[::pixspace_ref, ::pixspace_ref], 4 + ) diff --git a/yt/geometry/coordinates/tests/test_sph_pixelization_pytestonly.py b/yt/geometry/coordinates/tests/test_sph_pixelization_pytestonly.py new file mode 100644 index 0000000000..75c7175e06 --- /dev/null +++ b/yt/geometry/coordinates/tests/test_sph_pixelization_pytestonly.py @@ -0,0 +1,547 @@ +from typing import Union + +import numpy as np +import pytest +import unyt + +import yt +from yt.data_objects.selection_objects.region import YTRegion +from yt.testing import ( + assert_rel_equal, + cubicspline_python, + distancematrix, + fake_random_sph_ds, + fake_sph_flexible_grid_ds, + integrate_kernel, +) + + +@pytest.mark.parametrize("weighted", [True, False]) +@pytest.mark.parametrize("periodic", [True, False]) +@pytest.mark.parametrize("depth", [None, (1.0, "cm")]) +@pytest.mark.parametrize("shiftcenter", [False, True]) +@pytest.mark.parametrize("axis", [0, 1, 2]) +def test_sph_proj_general_alongaxes( + axis: int, + shiftcenter: bool, + depth: Union[float, None], + periodic: bool, + weighted: bool, +) -> None: + """ + The previous projection tests were for a specific issue. + Here, we test more functionality of the projections. + We just send lines of sight through pixel centers for convenience. + Particles at [0.5, 1.5, 2.5] (in each coordinate) + smoothing lengths 0.25 + all particles have mass 1., density 1.5, + except the single center particle, with mass 2., density 3. + + Parameters: + ----------- + axis: {0, 1, 2} + projection axis (aligned with sim. axis) + shiftcenter: bool + shift the coordinates to center the projection on. + (The grid is offset to this same center) + depth: float or None + depth of the projection slice + periodic: bool + assume periodic boundary conditions, or not + weighted: bool + make a weighted projection (density-weighted density), or not + + Returns: + -------- + None + """ + if shiftcenter: + center = unyt.unyt_array(np.array((0.625, 0.625, 0.625)), "cm") + else: + center = unyt.unyt_array(np.array((1.5, 1.5, 1.5)), "cm") + bbox = unyt.unyt_array(np.array([[0.0, 3.0], [0.0, 3.0], [0.0, 3.0]]), "cm") + hsml_factor = 0.5 + unitrho = 1.5 + + # test correct centering, particle selection + def makemasses(i, j, k): + if i == j == k == 1: + return 2.0 + else: + return 1.0 + + # m / rho, factor 1. / hsml**2 is included in the kernel integral + # (density is adjusted, so same for center particle) + prefactor = 1.0 / unitrho # / (0.5 * 0.5)**2 + dl_cen = integrate_kernel(cubicspline_python, 0.0, 0.25) + + # result shouldn't depend explicitly on the center if we re-center + # the data, unless we get cut-offs in the non-periodic case + ds = fake_sph_flexible_grid_ds( + hsml_factor=hsml_factor, + nperside=3, + periodic=periodic, + offsets=np.full(3, 0.5), + massgenerator=makemasses, + unitrho=unitrho, + bbox=bbox.v, + recenter=center.v, + ) + if depth is None: + source = ds.all_data() + else: + depth = unyt.unyt_quantity(*depth) + le = np.array(ds.domain_left_edge) + re = np.array(ds.domain_right_edge) + le[axis] = center[axis] - 0.5 * depth + re[axis] = center[axis] + 0.5 * depth + cen = 0.5 * (le + re) + reg = YTRegion(center=cen, left_edge=le, right_edge=re, ds=ds) + source = reg + + # we don't actually want a plot, it's just a straightforward, + # common way to get an frb / image array + if weighted: + toweight_field = ("gas", "density") + else: + toweight_field = None + prj = yt.ProjectionPlot( + ds, + axis, + ("gas", "density"), + width=(2.5, "cm"), + weight_field=toweight_field, + buff_size=(5, 5), + center=center, + data_source=source, + ) + img = prj.frb.data[("gas", "density")] + if weighted: + expected_out = np.zeros( + ( + 5, + 5, + ), + dtype=img.v.dtype, + ) + expected_out[::2, ::2] = unitrho + if depth is None: + ## during shift, particle coords do wrap around edges + # if (not periodic) and shiftcenter: + # # weight 1. for unitrho, 2. for 2. * untrho + # expected_out[2, 2] *= 5. / 3. + # else: + # weight (2 * 1.) for unitrho, (1 * 2.) for 2. * unitrho + expected_out[2, 2] *= 1.5 + else: + # only 2 * unitrho element included + expected_out[2, 2] *= 2.0 + else: + expected_out = np.zeros( + ( + 5, + 5, + ), + dtype=img.v.dtype, + ) + expected_out[::2, ::2] = dl_cen * prefactor * unitrho + if depth is None: + # 3 particles per l.o.s., including the denser one + expected_out *= 3.0 + expected_out[2, 2] *= 4.0 / 3.0 + else: + # 1 particle per l.o.s., including the denser one + expected_out[2, 2] *= 2.0 + # grid is shifted to the left -> 'missing' stuff at the left + if (not periodic) and shiftcenter: + expected_out[:1, :] = 0.0 + expected_out[:, :1] = 0.0 + # print(axis, shiftcenter, depth, periodic, weighted) + # print(expected_out) + # print(img.v) + assert_rel_equal(expected_out, img.v, 5) + + +@pytest.mark.parametrize("periodic", [True, False]) +@pytest.mark.parametrize("shiftcenter", [False, True]) +@pytest.mark.parametrize("zoff", [0.0, 0.1, 0.5, 1.0]) +@pytest.mark.parametrize("axis", [0, 1, 2]) +def test_sph_slice_general_alongaxes( + axis: int, + shiftcenter: bool, + periodic: bool, + zoff: float, +) -> None: + """ + Particles at [0.5, 1.5, 2.5] (in each coordinate) + smoothing lengths 0.25 + all particles have mass 1., density 1.5, + except the single center particle, with mass 2., density 3. + + Parameters: + ----------- + axis: {0, 1, 2} + projection axis (aligned with sim. axis) + northvector: tuple + y-axis direction in the final plot (direction vector) + shiftcenter: bool + shift the coordinates to center the projection on. + (The grid is offset to this same center) + zoff: float + offset of the slice plane from the SPH particle center plane + periodic: bool + assume periodic boundary conditions, or not + + Returns: + -------- + None + """ + if shiftcenter: + center = unyt.unyt_array(np.array((0.625, 0.625, 0.625)), "cm") + else: + center = unyt.unyt_array(np.array((1.5, 1.5, 1.5)), "cm") + bbox = unyt.unyt_array(np.array([[0.0, 3.0], [0.0, 3.0], [0.0, 3.0]]), "cm") + hsml_factor = 0.5 + unitrho = 1.5 + + # test correct centering, particle selection + def makemasses(i, j, k): + if i == j == k == 1: + return 2.0 + elif i == j == k == 2: + return 3.0 + else: + return 1.0 + + # result shouldn't depend explicitly on the center if we re-center + # the data, unless we get cut-offs in the non-periodic case + ds = fake_sph_flexible_grid_ds( + hsml_factor=hsml_factor, + nperside=3, + periodic=periodic, + offsets=np.full(3, 0.5), + massgenerator=makemasses, + unitrho=unitrho, + bbox=bbox.v, + recenter=center.v, + ) + ad = ds.all_data() + # print(ad[('gas', 'position')]) + outgridsize = 10 + width = 2.5 + _center = center.to("cm").v.copy() + _center[axis] += zoff + + # we don't actually want a plot, it's just a straightforward, + # common way to get an frb / image array + slc = yt.SlicePlot( + ds, + axis, + ("gas", "density"), + width=(width, "cm"), + buff_size=(outgridsize,) * 2, + center=(_center, "cm"), + ) + img = slc.frb.data[("gas", "density")] + + # center is same in non-projection coords + if axis == 0: + ci = 1 + else: + ci = 0 + gridcens = ( + _center[ci] + - 0.5 * width + + 0.5 * width / outgridsize + + np.arange(outgridsize) * width / outgridsize + ) + xgrid = np.repeat(gridcens, outgridsize) + ygrid = np.tile(gridcens, outgridsize) + zgrid = np.full(outgridsize**2, _center[axis]) + gridcoords = np.empty((outgridsize**2, 3), dtype=xgrid.dtype) + if axis == 2: + gridcoords[:, 0] = xgrid + gridcoords[:, 1] = ygrid + gridcoords[:, 2] = zgrid + elif axis == 0: + gridcoords[:, 0] = zgrid + gridcoords[:, 1] = xgrid + gridcoords[:, 2] = ygrid + elif axis == 1: + gridcoords[:, 0] = ygrid + gridcoords[:, 1] = zgrid + gridcoords[:, 2] = xgrid + ad = ds.all_data() + sphcoords = np.array( + [ + (ad[("gas", "x")]).to("cm"), + (ad[("gas", "y")]).to("cm"), + (ad[("gas", "z")]).to("cm"), + ] + ).T + # print("sphcoords:") + # print(sphcoords) + # print("gridcoords:") + # print(gridcoords) + dists = distancematrix( + gridcoords, + sphcoords, + periodic=(periodic,) * 3, + periods=np.array([3.0, 3.0, 3.0]), + ) + # print("dists <= 1:") + # print(dists <= 1) + sml = (ad[("gas", "smoothing_length")]).to("cm") + normkern = cubicspline_python(dists / sml.v[np.newaxis, :]) + sphcontr = normkern / sml[np.newaxis, :] ** 3 * ad[("gas", "mass")] + contsum = np.sum(sphcontr, axis=1) + sphweights = ( + normkern + / sml[np.newaxis, :] ** 3 + * ad[("gas", "mass")] + / ad[("gas", "density")] + ) + weights = np.sum(sphweights, axis=1) + nzeromask = np.logical_not(weights == 0) + expected = np.zeros(weights.shape, weights.dtype) + expected[nzeromask] = contsum[nzeromask] / weights[nzeromask] + expected = expected.reshape((outgridsize, outgridsize)) + # expected[np.isnan(expected)] = 0.0 # convention in the slices + + # print("expected:\n", expected) + # print("recovered:\n", img.v) + assert_rel_equal(expected, img.v, 5) + + +@pytest.mark.parametrize("periodic", [True, False]) +@pytest.mark.parametrize("shiftcenter", [False, True]) +@pytest.mark.parametrize("northvector", [None, (1.0e-4, 1.0, 0.0)]) +@pytest.mark.parametrize("zoff", [0.0, 0.1, 0.5, 1.0]) +def test_sph_slice_general_offaxis( + northvector: Union[tuple[float, float, float], None], + shiftcenter: bool, + zoff: float, + periodic: bool, +) -> None: + """ + Same as the on-axis slices, but we rotate the basis vectors + to test whether roations are handled ok. the rotation is chosen to + be small so that in/exclusion of particles within bboxes, etc. + works out the same way. + Particles at [0.5, 1.5, 2.5] (in each coordinate) + smoothing lengths 0.25 + all particles have mass 1., density 1.5, + except the single center particle, with mass 2., density 3. + + Parameters: + ----------- + northvector: tuple + y-axis direction in the final plot (direction vector) + shiftcenter: bool + shift the coordinates to center the projection on. + (The grid is offset to this same center) + zoff: float + offset of the slice plane from the SPH particle center plane + periodic: bool + assume periodic boundary conditions, or not + + Returns: + -------- + None + """ + if shiftcenter: + center = np.array((0.625, 0.625, 0.625)) # cm + else: + center = np.array((1.5, 1.5, 1.5)) # cm + bbox = unyt.unyt_array(np.array([[0.0, 3.0], [0.0, 3.0], [0.0, 3.0]]), "cm") + hsml_factor = 0.5 + unitrho = 1.5 + + # test correct centering, particle selection + def makemasses(i, j, k): + if i == j == k == 1: + return 2.0 + else: + return 1.0 + + # try to make sure dl differences from periodic wrapping are small + epsilon = 1e-4 + projaxis = np.array([epsilon, 0.00, np.sqrt(1.0 - epsilon**2)]) + e1dir = projaxis / np.sqrt(np.sum(projaxis**2)) + if northvector is None: + e2dir = np.array([0.0, 1.0, 0.0]) + else: + e2dir = np.asarray(northvector) + e2dir = e2dir - np.sum(e1dir * e2dir) * e2dir # orthonormalize + e2dir /= np.sqrt(np.sum(e2dir**2)) + e3dir = np.cross(e2dir, e1dir) + + outgridsize = 10 + width = 2.5 + _center = center.copy() + _center += zoff * e1dir + + ds = fake_sph_flexible_grid_ds( + hsml_factor=hsml_factor, + nperside=3, + periodic=periodic, + offsets=np.full(3, 0.5), + massgenerator=makemasses, + unitrho=unitrho, + bbox=bbox.v, + recenter=center, + e1hat=e1dir, + e2hat=e2dir, + e3hat=e3dir, + ) + + # source = ds.all_data() + # couple to dataset -> right unit registry + center = ds.arr(center, "cm") + # print("position:\n", source["gas", "position"]) + slc = yt.SlicePlot( + ds, + e1dir, + ("gas", "density"), + width=(width, "cm"), + buff_size=(outgridsize,) * 2, + center=(_center, "cm"), + north_vector=e2dir, + ) + img = slc.frb.data[("gas", "density")] + + # center is same in x/y (e3dir/e2dir) + gridcenx = ( + np.dot(_center, e3dir) + - 0.5 * width + + 0.5 * width / outgridsize + + np.arange(outgridsize) * width / outgridsize + ) + gridceny = ( + np.dot(_center, e2dir) + - 0.5 * width + + 0.5 * width / outgridsize + + np.arange(outgridsize) * width / outgridsize + ) + xgrid = np.repeat(gridcenx, outgridsize) + ygrid = np.tile(gridceny, outgridsize) + zgrid = np.full(outgridsize**2, np.dot(_center, e1dir)) + gridcoords = ( + xgrid[:, np.newaxis] * e3dir[np.newaxis, :] + + ygrid[:, np.newaxis] * e2dir[np.newaxis, :] + + zgrid[:, np.newaxis] * e1dir[np.newaxis, :] + ) + # print("gridcoords:") + # print(gridcoords) + ad = ds.all_data() + sphcoords = np.array( + [ + (ad[("gas", "x")]).to("cm"), + (ad[("gas", "y")]).to("cm"), + (ad[("gas", "z")]).to("cm"), + ] + ).T + dists = distancematrix( + gridcoords, + sphcoords, + periodic=(periodic,) * 3, + periods=np.array([3.0, 3.0, 3.0]), + ) + sml = (ad[("gas", "smoothing_length")]).to("cm") + normkern = cubicspline_python(dists / sml.v[np.newaxis, :]) + sphcontr = normkern / sml[np.newaxis, :] ** 3 * ad[("gas", "mass")] + contsum = np.sum(sphcontr, axis=1) + sphweights = ( + normkern + / sml[np.newaxis, :] ** 3 + * ad[("gas", "mass")] + / ad[("gas", "density")] + ) + weights = np.sum(sphweights, axis=1) + nzeromask = np.logical_not(weights == 0) + expected = np.zeros(weights.shape, weights.dtype) + expected[nzeromask] = contsum[nzeromask] / weights[nzeromask] + expected = expected.reshape((outgridsize, outgridsize)) + expected = expected.T # transposed for image plotting + # expected[np.isnan(expected)] = 0.0 # convention in the slices + + # print(axis, shiftcenter, depth, periodic, weighted) + # print("expected:\n", expected) + # print("recovered:\n", img.v) + assert_rel_equal(expected, img.v, 4) + + +# only axis-aligned; testing YTArbitraryGrid, YTCoveringGrid +@pytest.mark.parametrize("periodic", [True, False, (True, True, False)]) +@pytest.mark.parametrize("wholebox", [True, False]) +def test_sph_grid( + periodic: Union[bool, tuple[bool, bool, bool]], + wholebox: bool, +) -> None: + bbox = np.array([[-1.0, 3.0], [1.0, 5.2], [-1.0, 3.0]]) + ds = fake_random_sph_ds(50, bbox, periodic=periodic) + + if not hasattr(periodic, "__len__"): + periodic = (periodic,) * 3 + + if wholebox: + left = bbox[:, 0].copy() + level = 2 + ncells = np.array([2**level] * 3) + # print("left: ", left) + # print("ncells: ", ncells) + resgrid = ds.covering_grid(level, tuple(left), ncells) + right = bbox[:, 1].copy() + xedges = np.linspace(left[0], right[0], ncells[0] + 1) + yedges = np.linspace(left[1], right[1], ncells[1] + 1) + zedges = np.linspace(left[2], right[2], ncells[2] + 1) + else: + left = np.array([-1.0, 1.8, -1.0]) + right = np.array([2.5, 5.2, 2.5]) + ncells = np.array([3, 4, 4]) + resgrid = ds.arbitrary_grid(left, right, dims=ncells) + xedges = np.linspace(left[0], right[0], ncells[0] + 1) + yedges = np.linspace(left[1], right[1], ncells[1] + 1) + zedges = np.linspace(left[2], right[2], ncells[2] + 1) + res = resgrid["gas", "density"] + xcens = 0.5 * (xedges[:-1] + xedges[1:]) + ycens = 0.5 * (yedges[:-1] + yedges[1:]) + zcens = 0.5 * (zedges[:-1] + zedges[1:]) + + ad = ds.all_data() + sphcoords = np.array( + [ + (ad[("gas", "x")]).to("cm"), + (ad[("gas", "y")]).to("cm"), + (ad[("gas", "z")]).to("cm"), + ] + ).T + gridx, gridy, gridz = np.meshgrid(xcens, ycens, zcens, indexing="ij") + outshape = gridx.shape + gridx = gridx.flatten() + gridy = gridy.flatten() + gridz = gridz.flatten() + gridcoords = np.array([gridx, gridy, gridz]).T + periods = bbox[:, 1] - bbox[:, 0] + dists = distancematrix(gridcoords, sphcoords, periodic=periodic, periods=periods) + sml = (ad[("gas", "smoothing_length")]).to("cm") + normkern = cubicspline_python(dists / sml.v[np.newaxis, :]) + sphcontr = normkern / sml[np.newaxis, :] ** 3 * ad[("gas", "mass")] + contsum = np.sum(sphcontr, axis=1) + sphweights = ( + normkern + / sml[np.newaxis, :] ** 3 + * ad[("gas", "mass")] + / ad[("gas", "density")] + ) + weights = np.sum(sphweights, axis=1) + nzeromask = np.logical_not(weights == 0) + expected = np.zeros(weights.shape, weights.dtype) + expected[nzeromask] = contsum[nzeromask] / weights[nzeromask] + expected = expected.reshape(outshape) + # expected[np.isnan(expected)] = 0.0 # convention in the slices + + # print(axis, shiftcenter, depth, periodic, weighted) + # print("expected:\n", expected) + # print("recovered:\n", res.v) + assert_rel_equal(expected, res.v, 4) diff --git a/yt/geometry/tests/test_grid_container.py b/yt/geometry/tests/test_grid_container.py index 795d299cf5..23a45725ea 100644 --- a/yt/geometry/tests/test_grid_container.py +++ b/yt/geometry/tests/test_grid_container.py @@ -1,10 +1,14 @@ import random +import sys import numpy as np from numpy.testing import assert_equal, assert_raises from yt.loaders import load_amr_grids +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def setup_test_ds(): """Prepare setup specific environment""" @@ -102,7 +106,7 @@ def test_find_points(): grid_inds = np.zeros((num_points), dtype="int64") - for ind, ixx, iyy, izz in zip(range(num_points), randx, randy, randz): + for ind, ixx, iyy, izz in zip(range(num_points), randx, randy, randz, strict=True): pos = np.array([ixx, iyy, izz]) pt_level = -1 diff --git a/yt/geometry/tests/test_particle_deposit.py b/yt/geometry/tests/test_particle_deposit.py index c922e88ed9..fca774c273 100644 --- a/yt/geometry/tests/test_particle_deposit.py +++ b/yt/geometry/tests/test_particle_deposit.py @@ -2,7 +2,7 @@ import yt from yt.loaders import load -from yt.testing import fake_random_ds, requires_file +from yt.testing import fake_random_ds, requires_file, requires_module from yt.utilities.exceptions import YTBoundsDefinitionError @@ -34,6 +34,7 @@ def test_one_zone_octree_deposit(): assert sp["deposit", "io_cic"].shape == (1,) +@requires_module("h5py") @requires_file(RAMSES) @requires_file(ISOGAL) def test_mesh_sampling(): @@ -52,6 +53,7 @@ def test_mesh_sampling(): assert_array_less(-dist, dx) +@requires_module("h5py") @requires_file(RAMSES) @requires_file(ISOGAL) def test_mesh_sampling_for_filtered_particles(): diff --git a/yt/loaders.py b/yt/loaders.py index 1c05b877ee..77d090b134 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -9,6 +9,7 @@ import time import types import warnings +from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Optional, Union, cast from urllib.parse import urlsplit @@ -44,6 +45,9 @@ parallel_root_only_then_broadcast, ) +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + if TYPE_CHECKING: from multiprocessing.connection import Connection @@ -120,7 +124,7 @@ def load( for entrypoint in external_frontends: entrypoint.load() - candidates: list[type["Dataset"]] = [] + candidates: list[type[Dataset]] = [] for cls in output_type_registry.values(): if cls._is_valid(fn, *args, **kwargs): candidates.append(cls) @@ -694,7 +698,7 @@ def load_amr_grids( def load_particles( - data: dict[AnyFieldKey, np.ndarray], + data: Mapping[AnyFieldKey, Union[np.ndarray, tuple[np.ndarray, str]]], length_unit=None, bbox=None, sim_time=None, @@ -798,7 +802,7 @@ def load_particles( le, re = data_source.get_bbox() le = le.to_value("code_length") re = re.to_value("code_length") - bbox = list(zip(le, re)) + bbox = list(zip(le, re, strict=True)) if bbox is None: bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") else: @@ -833,7 +837,7 @@ def parse_unit(unit, dimension): field_units, data, _ = process_data(data) sfh = StreamDictFieldHandler() - pdata: dict[AnyFieldKey, np.ndarray] = {} + pdata: dict[AnyFieldKey, Union[np.ndarray, tuple[np.ndarray, str]]] = {} for key in data.keys(): field: FieldKey if not isinstance(key, tuple): @@ -1382,10 +1386,10 @@ def load_unstructured_mesh( node_data = list(always_iterable(node_data, base_type=dict)) or [{}] * num_meshes data = [{} for i in range(num_meshes)] # type: ignore [var-annotated] - for elem_dict, data_dict in zip(elem_data, data): + for elem_dict, data_dict in zip(elem_data, data, strict=True): for field, values in elem_dict.items(): data_dict[field] = values - for node_dict, data_dict in zip(node_data, data): + for node_dict, data_dict in zip(node_data, data, strict=True): for field, values in node_dict.items(): data_dict[field] = values @@ -1918,7 +1922,7 @@ def _reader(grid, field_name): grid_data = [] psize = get_psize(np.array(shape), nchunks) left_edges, right_edges, shapes, _, _ = decompose_array(shape, psize, bbox) - for le, re, s in zip(left_edges, right_edges, shapes): + for le, re, s in zip(left_edges, right_edges, shapes, strict=True): data = {_: reader for _ in fields} data.update({"left_edge": le, "right_edge": re, "dimensions": s, "level": 0}) grid_data.append(data) diff --git a/yt/testing.py b/yt/testing.py index 2205439ea1..80c238ae1c 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -9,6 +9,7 @@ from functools import wraps from importlib.util import find_spec from shutil import which +from typing import TYPE_CHECKING, Callable, Union from unittest import SkipTest import matplotlib @@ -19,10 +20,19 @@ from yt._maintenance.deprecation import issue_deprecation_warning from yt.config import ytcfg +from yt.frontends.stream.data_structures import StreamParticlesDataset from yt.funcs import is_sequence -from yt.loaders import load +from yt.loaders import load, load_particles from yt.units.yt_array import YTArray, YTQuantity +if TYPE_CHECKING: + from collections.abc import Mapping + + from yt._typing import AnyFieldKey + +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + ANSWER_TEST_TAG = "answer_test" @@ -80,6 +90,111 @@ def assert_rel_equal(a1, a2, decimals, err_msg="", verbose=True): ) +# tested: volume integral is 1. +def cubicspline_python( + x: Union[float, np.ndarray], +) -> np.ndarray: + """ + cubic spline SPH kernel function for testing against more + effiecient cython methods + + Parameters + ---------- + x: + impact parameter / smoothing length [dimenionless] + + Returns + ------- + value of the kernel function + """ + # C is 8/pi + _c = 8.0 / np.pi + x = np.asarray(x) + kernel = np.zeros(x.shape, dtype=x.dtype) + half1 = np.where(np.logical_and(x >= 0.0, x <= 0.5)) + kernel[half1] = 1.0 - 6.0 * x[half1] ** 2 * (1.0 - x[half1]) + half2 = np.where(np.logical_and(x > 0.5, x <= 1.0)) + kernel[half2] = 2.0 * (1.0 - x[half2]) ** 3 + return kernel * _c + + +def integrate_kernel( + kernelfunc: Callable[[float], float], b: float, hsml: float +) -> float: + """ + integrates a kernel function over a line passing entirely + through it + + Parameters: + ----------- + kernelfunc: + the kernel function to integrate + b: + impact parameter + hsml: + smoothing length [same units as impact parameter] + + Returns: + -------- + the integral of the SPH kernel function. + units: 1 / units of b and hsml + """ + pre = 1.0 / hsml**2 + x = b / hsml + xmax = np.sqrt(1.0 - x**2) + xmin = -1.0 * xmax + xe = np.linspace(xmin, xmax, 500) # shape: 500, x.shape + xc = 0.5 * (xe[:-1, ...] + xe[1:, ...]) + dx = np.diff(xe, axis=0) + spv = kernelfunc(np.sqrt(xc**2 + x**2)) + integral = np.sum(spv * dx, axis=0) + return pre * integral + + +_zeroperiods = np.array([0.0, 0.0, 0.0]) + + +def distancematrix( + pos3_i0: np.ndarray, + pos3_i1: np.ndarray, + periodic: tuple[bool, bool, bool] = (True,) * 3, + periods: np.ndarray = _zeroperiods, +) -> np.ndarray: + """ + Calculates the distances between two arrays of points. + + Parameters: + ---------- + pos3_i0: shape (first number of points, 3) + positions of the first set of points. The second index is + for positions along the different cartesian axes + pos3_i1: shape (second number of points, 3) + as pos3_i0, but for the second set of points + periodic: + are the positions along each axis periodic (True) or not + periods: + the periods along each axis. Ignored if positions in a given + direction are not periodic. + + Returns: + -------- + a 2D-array of distances between postions `pos3_i0` (changes along + index 0) and `pos3_i1` (changes along index 1) + + """ + d2 = np.zeros((len(pos3_i0), len(pos3_i1)), dtype=pos3_i0.dtype) + for ax in range(3): + # 'center on' pos3_i1 + _d = pos3_i0[:, ax, np.newaxis] - pos3_i1[np.newaxis, :, ax] + if periodic[ax]: + _period = periods[ax] + _d += 0.5 * _period # center on half box size + _d %= _period # wrap coordinate to 0 -- boxsize range + _d -= 0.5 * _period # center back to zero + d2 += _d**2 + return np.sqrt(d2) + + def amrspace(extent, levels=7, cells=8): """Creates two numpy arrays representing the left and right bounds of an AMR grid as well as an array for the AMR level of each cell. @@ -270,14 +385,14 @@ def fake_random_ds( else: offsets.append(0.0) data = {} - for field, offset, u in zip(fields, offsets, units): + for field, offset, u in zip(fields, offsets, units, strict=True): v = (prng.random_sample(ndims) - offset) * peak_value if field[0] == "all": v = v.ravel() data[field] = (v, u) if particles: if particle_fields is not None: - for field, unit in zip(particle_fields, particle_field_units): + for field, unit in zip(particle_fields, particle_field_units, strict=True): if field in ("particle_position", "particle_velocity"): data["io", field] = (prng.random_sample((int(particles), 3)), unit) else: @@ -347,7 +462,7 @@ def fake_amr_ds( "right_edge": right_edge, "dimensions": dims, } - for f, u in zip(fields, units): + for f, u in zip(fields, units, strict=True): gdata[f] = (prng.random_sample(dims), u) if particles: for i, f in enumerate(f"particle_position_{ax}" for ax in "xyz"): @@ -412,7 +527,7 @@ def fake_particle_ds( else: offsets.append(0.0) data = data if data else {} - for field, offset, u in zip(fields, offsets, units): + for field, offset, u in zip(fields, offsets, units, strict=True): if field in data: v = data[field] continue @@ -686,6 +801,220 @@ def fake_sph_grid_ds(hsml_factor=1.0): return load_particles(data=data, length_unit=1.0, bbox=bbox) +def constantmass(i: int, j: int, k: int) -> float: + return 1.0 + + +_xhat = np.array([1, 0, 0]) +_yhat = np.array([0, 1, 0]) +_zhat = np.array([0, 0, 1]) +_floathalves = 0.5 * np.ones((3,), dtype=np.float64) + + +def fake_sph_flexible_grid_ds( + hsml_factor: float = 1.0, + nperside: int = 3, + periodic: bool = True, + e1hat: np.ndarray = _xhat, + e2hat: np.ndarray = _yhat, + e3hat: np.ndarray = _zhat, + offsets: np.ndarray = _floathalves, + massgenerator: Callable[[int, int, int], float] = constantmass, + unitrho: float = 1.0, + bbox: Union[np.ndarray, None] = None, + recenter: Union[np.ndarray, None] = None, +) -> StreamParticlesDataset: + """Returns an in-memory SPH dataset useful for testing + + Parameters: + ----------- + hsml_factor: + all particles have smoothing lengths of `hsml_factor` * 0.5 + nperside: + the dataset will have `nperside`**3 particles, arranged + uniformly on a 3D grid + periodic: + are the positions taken to be periodic? (applies to all + coordinate axes) + e1hat: shape (3,) + the first basis vector defining the 3D grid. If the basis + vectors are not normalized to 1 or not orthogonal, the spacing + or overlap between SPH particles will be affected, but this is + allowed. + e2hat: shape (3,) + the second basis vector defining the 3D grid. (See `e1hat`.) + e3hat: shape (3,) + the third basis vector defining the 3D grid. (See `e1hat`.) + offsets: shape (3,) + the the zero point of the 3D grid along each coordinate axis + massgenerator: + a function assigning a mass to each particle, as a function of + the e[1-3]hat indices, in order + unitrho: + defines the density for a particle with mass 1 ('g'), and the + standard (uniform) grid `hsml_factor`. + bbox: if np.ndarray, shape is (2, 3) + the assumed enclosing volume of the particles. Should enclose + all the coordinate values. If not specified, a bbox is defined + which encloses all coordinates values with a margin. If + `periodic`, the size of the `bbox` along each coordinate is + also the period along that axis. + recenter: + if not `None`, after generating the grid, the positions are + periodically shifted to move the old center to this positions. + Useful for testing periodicity handling. + This shift is relative to the halfway positions of the bbox + edges. + + Returns: + -------- + A `StreamParticlesDataset` object with particle positions, masses, + velocities (zero), smoothing lengths, and densities specified. + Values are in cgs units. + """ + + npart = nperside**3 + + pos = np.empty((npart, 3), dtype=np.float64) + mass = np.empty((npart,), dtype=np.float64) + for i in range(0, nperside): + for j in range(0, nperside): + for k in range(0, nperside): + _pos = ( + (offsets[0] + i) * e1hat + + (offsets[1] + j) * e2hat + + (offsets[2] + k) * e3hat + ) + ind = nperside**2 * i + nperside * j + k + pos[ind, :] = _pos + mass[ind] = massgenerator(i, j, k) + rho = unitrho * mass + + if bbox is None: + eps = 1e-3 + margin = (1.0 + eps) * hsml_factor + bbox = np.array( + [ + [np.min(pos[:, 0]) - margin, np.max(pos[:, 0]) + margin], + [np.min(pos[:, 1]) - margin, np.max(pos[:, 1]) + margin], + [np.min(pos[:, 2]) - margin, np.max(pos[:, 2]) + margin], + ] + ) + + if recenter is not None: + periods = bbox[:, 1] - bbox[:, 0] + # old center -> new position + pos += -0.5 * periods[np.newaxis, :] + recenter[np.newaxis, :] + # wrap coordinates -> all in [0, boxsize) range + pos %= periods[np.newaxis, :] + # shift back to original bbox range + pos += (bbox[:, 0])[np.newaxis, :] + if not periodic: + # remove points outside bbox to avoid errors: + okinds = np.ones(len(mass), dtype=bool) + for ax in [0, 1, 2]: + okinds &= pos[:, ax] < bbox[ax, 1] + okinds &= pos[:, ax] >= bbox[ax, 0] + npart = sum(okinds) + else: + okinds = np.ones((npart,), dtype=bool) + + data: Mapping[AnyFieldKey, tuple[np.ndarray, str]] = { + "particle_position_x": (np.copy(pos[okinds, 0]), "cm"), + "particle_position_y": (np.copy(pos[okinds, 1]), "cm"), + "particle_position_z": (np.copy(pos[okinds, 2]), "cm"), + "particle_mass": (np.copy(mass[okinds]), "g"), + "particle_velocity_x": (np.zeros(npart), "cm/s"), + "particle_velocity_y": (np.zeros(npart), "cm/s"), + "particle_velocity_z": (np.zeros(npart), "cm/s"), + "smoothing_length": (np.ones(npart) * 0.5 * hsml_factor, "cm"), + "density": (np.copy(rho[okinds]), "g/cm**3"), + } + + ds = load_particles( + data=data, + bbox=bbox, + periodicity=(periodic,) * 3, + length_unit=1.0, + mass_unit=1.0, + time_unit=1.0, + velocity_unit=1.0, + ) + ds.kernel_name = "cubic" + return ds + + +def fake_random_sph_ds( + npart: int, + bbox: np.ndarray, + periodic: Union[bool, tuple[bool, bool, bool]] = True, + massrange: tuple[float, float] = (0.5, 2.0), + hsmlrange: tuple[float, float] = (0.5, 2.0), + unitrho: float = 1.0, +) -> StreamParticlesDataset: + """Returns an in-memory SPH dataset useful for testing + + Parameters: + ----------- + npart: + number of particles to generate + bbox: shape: (3, 2), units: "cm" + the assumed enclosing volume of the particles. Particle + positions are drawn uniformly from these ranges. + periodic: + are the positions taken to be periodic? If a single value, + that value is applied to all axes + massrange: + particle masses are drawn uniformly from this range (unit: "g") + hsmlrange: units: "cm" + particle smoothing lengths are drawn uniformly from this range + unitrho: + defines the density for a particle with mass 1 ("g"), and + smoothing length 1 ("cm"). + + Returns: + -------- + A `StreamParticlesDataset` object with particle positions, masses, + velocities (zero), smoothing lengths, and densities specified. + Values are in cgs units. + """ + + if not hasattr(periodic, "__len__"): + periodic = (periodic,) * 3 + gen = np.random.default_rng(seed=0) + + posx = gen.uniform(low=bbox[0][0], high=bbox[0][1], size=npart) + posy = gen.uniform(low=bbox[1][0], high=bbox[1][1], size=npart) + posz = gen.uniform(low=bbox[2][0], high=bbox[2][1], size=npart) + mass = gen.uniform(low=massrange[0], high=massrange[1], size=npart) + hsml = gen.uniform(low=hsmlrange[0], high=hsmlrange[1], size=npart) + dens = mass / hsml**3 * unitrho + + data: Mapping[AnyFieldKey, tuple[np.ndarray, str]] = { + "particle_position_x": (posx, "cm"), + "particle_position_y": (posy, "cm"), + "particle_position_z": (posz, "cm"), + "particle_mass": (mass, "g"), + "particle_velocity_x": (np.zeros(npart), "cm/s"), + "particle_velocity_y": (np.zeros(npart), "cm/s"), + "particle_velocity_z": (np.zeros(npart), "cm/s"), + "smoothing_length": (hsml, "cm"), + "density": (dens, "g/cm**3"), + } + + ds = load_particles( + data=data, + bbox=bbox, + periodicity=periodic, + length_unit=1.0, + mass_unit=1.0, + time_unit=1.0, + velocity_unit=1.0, + ) + ds.kernel_name = "cubic" + return ds + + def construct_octree_mask(prng=RandomState(0x1D3D3D3), refined=None): # noqa B008 # Implementation taken from url: # http://docs.hyperion-rt.org/en/stable/advanced/indepth_oct.html @@ -856,7 +1185,7 @@ def expand_keywords(keywords, full=False): keys = sorted(keywords) list_of_kwarg_dicts = np.array( [ - dict(zip(keys, prod)) + dict(zip(keys, prod, strict=True)) for prod in it.product(*(keywords[key] for key in keys)) ] ) diff --git a/yt/tests/test_load_sample.py b/yt/tests/test_load_sample.py index c5b5189bc4..f76ba4d7f8 100644 --- a/yt/tests/test_load_sample.py +++ b/yt/tests/test_load_sample.py @@ -109,6 +109,12 @@ def test_load_sample_small_dataset( ) +@pytest.mark.skipif( + sys.platform.startswith("win"), + # flakyness is probably due to Windows' infamous lack of time resolution + # overall, this test doesn't seem worth it. + reason="This test is flaky on Windows", +) @requires_module_pytest("pandas", "pooch") @pytest.mark.usefixtures("capturable_logger") def test_load_sample_timeout(tmp_data_dir, caplog): diff --git a/yt/utilities/amr_kdtree/amr_kdtools.py b/yt/utilities/amr_kdtree/amr_kdtools.py index f42e3a36f7..01d7327a26 100644 --- a/yt/utilities/amr_kdtree/amr_kdtools.py +++ b/yt/utilities/amr_kdtree/amr_kdtools.py @@ -3,13 +3,17 @@ from yt.funcs import mylog -def receive_and_reduce(comm, incoming_rank, image, add_to_front): +def receive_and_reduce(comm, incoming_rank, image, add_to_front, *, use_opacity=True): mylog.debug("Receiving image from %04i", incoming_rank) # mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner)) arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape( (image.shape[0], image.shape[1], image.shape[2]) ) + if not use_opacity: + np.add(image, arr2, image) + return image + if add_to_front: front = arr2 back = image @@ -31,6 +35,7 @@ def receive_and_reduce(comm, incoming_rank, image, add_to_front): for i in range(4): np.multiply(image[:, :, i], ta, image[:, :, i]) np.add(image, front, image) + return image diff --git a/yt/utilities/amr_kdtree/amr_kdtree.py b/yt/utilities/amr_kdtree/amr_kdtree.py index b8378c6671..861f4320c1 100644 --- a/yt/utilities/amr_kdtree/amr_kdtree.py +++ b/yt/utilities/amr_kdtree/amr_kdtree.py @@ -294,7 +294,7 @@ def get_reduce_owners(self): owners[temp.node_id] = owners[temp.left.node_id] return owners - def reduce_tree_images(self, image, viewpoint): + def reduce_tree_images(self, image, viewpoint, *, use_opacity=True): if self.comm.size <= 1: return image myrank = self.comm.rank @@ -307,7 +307,11 @@ def reduce_tree_images(self, image, viewpoint): split_pos = node.parent.get_split_pos() add_to_front = viewpoint[split_dim] >= split_pos image = receive_and_reduce( - self.comm, owners[node.parent.right.node_id], image, add_to_front + self.comm, + owners[node.parent.right.node_id], + image, + add_to_front, + use_opacity=use_opacity, ) if node.parent.node_id == 1: break diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index 37358573dd..0421ef0d72 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -47,6 +47,10 @@ profile_plotter as profile_plotter, ) +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + + mylog = logging.getLogger("nose.plugins.answer-testing") run_big_data = False @@ -769,16 +773,24 @@ def run(self): return result def compare(self, new_result, old_result): - for newp, oldp in zip(new_result["parents"], old_result["parents"]): + for newp, oldp in zip( + new_result["parents"], + old_result["parents"], + strict=True, + ): assert newp == oldp - for newc, oldc in zip(new_result["children"], old_result["children"]): + for newc, oldc in zip( + new_result["children"], + old_result["children"], + strict=True, + ): assert newc == oldc def dump_images(new_result, old_result, decimals=10): - tmpfd, old_image = tempfile.mkstemp(suffix=".png") + tmpfd, old_image = tempfile.mkstemp(prefix="baseline_", suffix=".png") os.close(tmpfd) - tmpfd, new_image = tempfile.mkstemp(suffix=".png") + tmpfd, new_image = tempfile.mkstemp(prefix="thisPR_", suffix=".png") os.close(tmpfd) image_writer.write_projection(new_result, new_image) image_writer.write_projection(old_result, old_image) @@ -840,7 +852,10 @@ def compare_image_lists(new_result, old_result, decimals): line.strip() for line in results.split("\n") if line.endswith(".png") ] for fn, img, padded in zip( - tempfiles, (expected, actual), (expected_p, actual_p) + tempfiles, + (expected, actual), + (expected_p, actual_p), + strict=True, ): # padded images are convenient for comparison # but what we really want to store and upload diff --git a/yt/utilities/configuration_tree.py b/yt/utilities/configuration_tree.py index 06a561941a..c3b87e86c3 100644 --- a/yt/utilities/configuration_tree.py +++ b/yt/utilities/configuration_tree.py @@ -171,7 +171,7 @@ def value(self): @value.setter def value(self, new_value): - if type(self.value) == type(new_value): + if type(self.value) is type(new_value): self._value = new_value else: tree = self.get_tree() diff --git a/yt/utilities/cosmology.py b/yt/utilities/cosmology.py index cb415bc766..691d86e1c4 100644 --- a/yt/utilities/cosmology.py +++ b/yt/utilities/cosmology.py @@ -95,7 +95,7 @@ def __init__( new_unit, my_u.base_value, dimensions.length, - "\\rm{%s}/(1+z)" % my_unit, + f"\\rm{{{my_unit}}}/(1+z)", prefixable=True, ) self.unit_registry = unit_registry diff --git a/yt/utilities/fortran_utils.py b/yt/utilities/fortran_utils.py index 32956767e8..10ff51ea29 100644 --- a/yt/utilities/fortran_utils.py +++ b/yt/utilities/fortran_utils.py @@ -73,11 +73,9 @@ def read_attrs(f, attrs, endian="="): raise OSError( "An error occurred while reading a Fortran " "record. Record length is not equal to expected " - "length: %s %s", - len(a), - len(v), + f"length: {len(a)} {len(v)}" ) - for k, val in zip(a, v): + for k, val in zip(a, v, strict=True): vv[k] = val else: vv[a] = v @@ -140,12 +138,9 @@ def read_cattrs(f, attrs, endian="="): raise OSError( "An error occurred while reading a Fortran " "record. Record length is not equal to expected " - "length: %s %s", - len(a), - len(v), + f"length: {len(a)} {len(v)}" ) - - for k, val in zip(a, v): + for k, val in zip(a, v, strict=True): vv[k] = val else: vv[a] = v diff --git a/yt/utilities/lib/misc_utilities.pyx b/yt/utilities/lib/misc_utilities.pyx index 88e1d7e326..2a5e61efa0 100644 --- a/yt/utilities/lib/misc_utilities.pyx +++ b/yt/utilities/lib/misc_utilities.pyx @@ -921,7 +921,7 @@ def fill_region_float(np.ndarray[np.float64_t, ndim=2] fcoords, if (sp[1] + odsp[1] < LE[1]) or (sp[1] - odsp[1] > RE[1]): continue for zi in range(2): if diter[2][zi] == 999: continue - sp[2] = osp[2] + diterv[2][yi] + sp[2] = osp[2] + diterv[2][zi] if (sp[2] + odsp[2] < LE[2]) or (sp[2] - odsp[2] > RE[2]): continue for i in range(3): ld[i] = fmax(((sp[i]-odsp[i]-LE[i])*box_idds[i]),0) diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index 8cc37ab1d1..a207473e9e 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -1123,6 +1123,7 @@ def pixelize_sph_kernel_projection( np.uint8_t[:, :] mask, any_float[:] posx, any_float[:] posy, + any_float[:] posz, any_float[:] hsml, any_float[:] pmass, any_float[:] pdens, @@ -1130,21 +1131,24 @@ def pixelize_sph_kernel_projection( bounds, kernel_name="cubic", weight_field=None, - int check_period=1, + _check_period = (1, 1, 1), period=None): cdef np.intp_t xsize, ysize - cdef np.float64_t x_min, x_max, y_min, y_max, prefactor_j + cdef np.float64_t x_min, x_max, y_min, y_max, z_min, z_max, prefactor_j cdef np.int64_t xi, yi, x0, x1, y0, y1, xxi, yyi cdef np.float64_t q_ij2, posx_diff, posy_diff, ih_j2 - cdef np.float64_t x, y, dx, dy, idx, idy, h_j2, px, py - cdef np.float64_t period_x = 0, period_y = 0 - cdef int i, j, ii, jj + cdef np.float64_t x, y, dx, dy, idx, idy, h_j2, px, py, pz + cdef np.float64_t period_x = 0, period_y = 0, period_z = 0 + cdef int i, j, ii, jj, kk cdef np.float64_t[:] _weight_field cdef int * xiter cdef int * yiter + cdef int * ziter cdef np.float64_t * xiterv cdef np.float64_t * yiterv + cdef np.float64_t * ziterv + cdef np.int8_t[3] check_period if weight_field is not None: _weight_field = weight_field @@ -1152,7 +1156,9 @@ def pixelize_sph_kernel_projection( if period is not None: period_x = period[0] period_y = period[1] - + period_z = period[2] + for i in range(3): + check_period[i] = np.int8(_check_period[i]) # we find the x and y range over which we have pixels and we find how many # pixels we have in each dimension xsize, ysize = buff.shape[0], buff.shape[1] @@ -1160,6 +1166,8 @@ def pixelize_sph_kernel_projection( x_max = bounds[1] y_min = bounds[2] y_max = bounds[3] + z_min = bounds[4] + z_max = bounds[5] dx = (x_max - x_min) / xsize dy = (y_max - y_min) / ysize @@ -1170,7 +1178,6 @@ def pixelize_sph_kernel_projection( if kernel_name not in kernel_tables: kernel_tables[kernel_name] = SPHKernelInterpolationTable(kernel_name) cdef SPHKernelInterpolationTable itab = kernel_tables[kernel_name] - with nogil, parallel(): # loop through every particle # NOTE: this loop can be quite time consuming. However it is easily @@ -1190,10 +1197,12 @@ def pixelize_sph_kernel_projection( local_buff = malloc(sizeof(np.float64_t) * xsize * ysize) xiterv = malloc(sizeof(np.float64_t) * 2) yiterv = malloc(sizeof(np.float64_t) * 2) + ziterv = malloc(sizeof(np.float64_t) * 2) xiter = malloc(sizeof(int) * 2) yiter = malloc(sizeof(int) * 2) - xiter[0] = yiter[0] = 0 - xiterv[0] = yiterv[0] = 0.0 + ziter = malloc(sizeof(int) * 2) + xiter[0] = yiter[0] = ziter[0] = 0 + xiterv[0] = yiterv[0] = ziterv[0] = 0.0 for i in range(xsize * ysize): local_buff[i] = 0.0 @@ -1202,75 +1211,98 @@ def pixelize_sph_kernel_projection( with gil: PyErr_CheckSignals() - xiter[1] = yiter[1] = 999 + xiter[1] = yiter[1] = ziter[1] = 999 - if check_period == 1: + if check_period[0] == 1: if posx[j] - hsml[j] < x_min: xiter[1] = +1 xiterv[1] = period_x elif posx[j] + hsml[j] > x_max: xiter[1] = -1 xiterv[1] = -period_x + if check_period[1] == 1: if posy[j] - hsml[j] < y_min: yiter[1] = +1 yiterv[1] = period_y elif posy[j] + hsml[j] > y_max: yiter[1] = -1 yiterv[1] = -period_y + if check_period[2] == 1: + if posz[j] - hsml[j] < z_min: + ziter[1] = +1 + ziterv[1] = period_z + elif posz[j] + hsml[j] > z_max: + ziter[1] = -1 + ziterv[1] = -period_z # we set the smoothing length squared with lower limit of the pixel - h_j2 = fmax(hsml[j]*hsml[j], dx*dy) + # Nope! that causes weird grid resolution dependences and increases + # total values when resolution elements have hsml < grid spacing + h_j2 = hsml[j]*hsml[j] ih_j2 = 1.0/h_j2 prefactor_j = pmass[j] / pdens[j] / hsml[j]**2 * quantity_to_smooth[j] if weight_field is not None: prefactor_j *= _weight_field[j] - for ii in range(2): - if xiter[ii] == 999: continue - px = posx[j] + xiterv[ii] - if (px + hsml[j] < x_min) or (px - hsml[j] > x_max): continue - for jj in range(2): - if yiter[jj] == 999: continue - py = posy[j] + yiterv[jj] - if (py + hsml[j] < y_min) or (py - hsml[j] > y_max): continue - - # here we find the pixels which this particle contributes to - x0 = ((px - hsml[j] - x_min)*idx) - x1 = ((px + hsml[j] - x_min)*idx) - x0 = iclip(x0-1, 0, xsize) - x1 = iclip(x1+1, 0, xsize) + # Discussion point: do we want the hsml margin on the z direction? + # it's consistent with Ray and Region selections, I think, + # but does tend to 'tack on' stuff compared to the nominal depth + for kk in range(2): + # discard if z is outside bounds + if ziter[kk] == 999: continue + pz = posz[j] + ziterv[kk] + ## removed hsml 'margin' in the projection direction to avoid + ## double-counting particles near periodic edges + ## and adding extra 'depth' to projections + #if (pz + hsml[j] < z_min) or (pz - hsml[j] > z_max): continue + if (pz < z_min) or (pz > z_max): continue + + for ii in range(2): + if xiter[ii] == 999: continue + px = posx[j] + xiterv[ii] + if (px + hsml[j] < x_min) or (px - hsml[j] > x_max): continue + for jj in range(2): + if yiter[jj] == 999: continue + py = posy[j] + yiterv[jj] + if (py + hsml[j] < y_min) or (py - hsml[j] > y_max): continue + + # here we find the pixels which this particle contributes to + x0 = ((px - hsml[j] - x_min)*idx) + x1 = ((px + hsml[j] - x_min)*idx) + x0 = iclip(x0-1, 0, xsize) + x1 = iclip(x1+1, 0, xsize) - y0 = ((py - hsml[j] - y_min)*idy) - y1 = ((py + hsml[j] - y_min)*idy) - y0 = iclip(y0-1, 0, ysize) - y1 = iclip(y1+1, 0, ysize) + y0 = ((py - hsml[j] - y_min)*idy) + y1 = ((py + hsml[j] - y_min)*idy) + y0 = iclip(y0-1, 0, ysize) + y1 = iclip(y1+1, 0, ysize) - # found pixels we deposit on, loop through those pixels - for xi in range(x0, x1): - # we use the centre of the pixel to calculate contribution - x = (xi + 0.5) * dx + x_min + # found pixels we deposit on, loop through those pixels + for xi in range(x0, x1): + # we use the centre of the pixel to calculate contribution + x = (xi + 0.5) * dx + x_min - posx_diff = px - x - posx_diff = posx_diff * posx_diff + posx_diff = px - x + posx_diff = posx_diff * posx_diff - if posx_diff > h_j2: continue + if posx_diff > h_j2: continue - for yi in range(y0, y1): - y = (yi + 0.5) * dy + y_min + for yi in range(y0, y1): + y = (yi + 0.5) * dy + y_min - posy_diff = py - y - posy_diff = posy_diff * posy_diff - if posy_diff > h_j2: continue + posy_diff = py - y + posy_diff = posy_diff * posy_diff + if posy_diff > h_j2: continue - q_ij2 = (posx_diff + posy_diff) * ih_j2 - if q_ij2 >= 1: - continue + q_ij2 = (posx_diff + posy_diff) * ih_j2 + if q_ij2 >= 1: + continue - # see equation 32 of the SPLASH paper - # now we just use the kernel projection - local_buff[xi + yi*xsize] += prefactor_j * itab.interpolate(q_ij2) - mask[xi, yi] = 1 + # see equation 32 of the SPLASH paper + # now we just use the kernel projection + local_buff[xi + yi*xsize] += prefactor_j * itab.interpolate(q_ij2) + mask[xi, yi] = 1 with gil: for xxi in range(xsize): @@ -1466,32 +1498,46 @@ def pixelize_sph_kernel_slice( np.float64_t[:, :] buff, np.uint8_t[:, :] mask, np.float64_t[:] posx, np.float64_t[:] posy, + np.float64_t[:] posz, np.float64_t[:] hsml, np.float64_t[:] pmass, np.float64_t[:] pdens, np.float64_t[:] quantity_to_smooth, - bounds, kernel_name="cubic", - int check_period=1, + bounds, + np.float64_t slicez, + kernel_name="cubic", + _check_period = (1, 1, 1), period=None): - + #print("bounds, slicez, kernel_name, check_period, period") + #print(bounds) + #print(slicez) + #print(kernel_name) + #print(check_period) + #print(period) + #print() + # bounds are [x0, x1, y0, y1], slicez is the single coordinate + # of the slice along the normal direction. # similar method to pixelize_sph_kernel_projection cdef np.intp_t xsize, ysize cdef np.float64_t x_min, x_max, y_min, y_max, prefactor_j cdef np.int64_t xi, yi, x0, x1, y0, y1, xxi, yyi - cdef np.float64_t q_ij, posx_diff, posy_diff, ih_j - cdef np.float64_t x, y, dx, dy, idx, idy, h_j2, h_j, px, py + cdef np.float64_t q_ij, posx_diff, posy_diff, posz_diff, ih_j + cdef np.float64_t x, y, dx, dy, idx, idy, h_j2, h_j, px, py, pz cdef int i, j, ii, jj - cdef np.float64_t period_x = 0, period_y = 0 + cdef np.float64_t period_x = 0, period_y = 0, period_z = 0 cdef int * xiter cdef int * yiter cdef np.float64_t * xiterv cdef np.float64_t * yiterv + cdef np.int8_t[3] check_period if period is not None: period_x = period[0] period_y = period[1] + period_z = period[2] + for i in range(3): + check_period[i] = np.int8(_check_period[i]) xsize, ysize = buff.shape[0], buff.shape[1] - x_min = bounds[0] x_max = bounds[1] y_min = bounds[2] @@ -1503,7 +1549,7 @@ def pixelize_sph_kernel_slice( idy = 1.0/dy kernel = get_kernel_func(kernel_name) - + #print('particle index, ii, jj, px, py, pz') with nogil, parallel(): # NOTE see note in pixelize_sph_kernel_projection local_buff = malloc(sizeof(np.float64_t) * xsize * ysize) @@ -1520,27 +1566,41 @@ def pixelize_sph_kernel_slice( if j % 100000 == 0: with gil: PyErr_CheckSignals() - + #with gil: + # print(j) xiter[1] = yiter[1] = 999 - - if check_period == 1: + pz = posz[j] + if check_period[0] == 1: if posx[j] - hsml[j] < x_min: - xiter[1] = +1 + xiter[1] = 1 xiterv[1] = period_x elif posx[j] + hsml[j] > x_max: xiter[1] = -1 xiterv[1] = -period_x + if check_period[1] == 1: if posy[j] - hsml[j] < y_min: - yiter[1] = +1 + yiter[1] = 1 yiterv[1] = period_y elif posy[j] + hsml[j] > y_max: yiter[1] = -1 yiterv[1] = -period_y - - h_j2 = fmax(hsml[j]*hsml[j], dx*dy) - h_j = math.sqrt(h_j2) + if check_period[2] == 1: + # z of particle might be < hsml from the slice plane + # but across a periodic boundary + if posz[j] - hsml[j] > slicez: + pz = posz[j] - period_z + elif posz[j] + hsml[j] < slicez: + pz = posz[j] + period_z + + h_j2 = hsml[j] * hsml[j] #fmax(hsml[j]*hsml[j], dx*dy) + h_j = hsml[j] #math.sqrt(h_j2) ih_j = 1.0/h_j + posz_diff = pz - slicez + posz_diff = posz_diff * posz_diff + if posz_diff > h_j2: + continue + prefactor_j = pmass[j] / pdens[j] / hsml[j]**3 prefactor_j *= quantity_to_smooth[j] @@ -1562,7 +1622,8 @@ def pixelize_sph_kernel_slice( y1 = ( (py + hsml[j] - y_min) * idy) y0 = iclip(y0-1, 0, ysize) y1 = iclip(y1+1, 0, ysize) - + #with gil: + # print(ii, jj, px, py, pz) # Now we know which pixels to deposit onto for this particle, # so loop over them and add this particle's contribution for xi in range(x0, x1): @@ -1582,7 +1643,9 @@ def pixelize_sph_kernel_slice( continue # see equation 4 of the SPLASH paper - q_ij = math.sqrt(posx_diff + posy_diff) * ih_j + q_ij = math.sqrt(posx_diff + + posy_diff + + posz_diff) * ih_j if q_ij >= 1: continue @@ -1610,13 +1673,14 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, np.float64_t[:] pdens, np.float64_t[:] quantity_to_smooth, bounds, pbar=None, kernel_name="cubic", - int check_period=1, period=None): + check_period=True, period=None): cdef np.intp_t xsize, ysize, zsize cdef np.float64_t x_min, x_max, y_min, y_max, z_min, z_max, prefactor_j cdef np.int64_t xi, yi, zi, x0, x1, y0, y1, z0, z1 cdef np.float64_t q_ij, posx_diff, posy_diff, posz_diff, px, py, pz - cdef np.float64_t x, y, z, dx, dy, dz, idx, idy, idz, h_j3, h_j2, h_j, ih_j + cdef np.float64_t x, y, z, dx, dy, dz, idx, idy, idz, h_j2, h_j, ih_j + # cdef np.float64_t h_j3 cdef int j, ii, jj, kk cdef np.float64_t period_x = 0, period_y = 0, period_z = 0 @@ -1626,10 +1690,21 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, cdef np.float64_t xiterv[2] cdef np.float64_t yiterv[2] cdef np.float64_t ziterv[2] + cdef int[3] periodic + xiter[0] = yiter[0] = ziter[0] = 0 xiterv[0] = yiterv[0] = ziterv[0] = 0.0 + if hasattr(check_period, "__len__"): + periodic[0] = int(check_period[0]) + periodic[1] = int(check_period[1]) + periodic[2] = int(check_period[2]) + else: + _cp = int(check_period) + periodic[0] = _cp + periodic[1] = _cp + periodic[2] = _cp if period is not None: period_x = period[0] period_y = period[1] @@ -1652,6 +1727,16 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, kernel = get_kernel_func(kernel_name) + # nogil seems dangerous here, but there are no actual parallel + # sections (e.g., prange instead of range) used here. + # However, for future writers: + # !! the final buff array mutation has no protections against + # !! race conditions (e.g., OpenMP's atomic read/write), and + # !! cython doesn't seem to provide such options. + # (other routines in this file use private variable buffer arrays + # and add everything together at the end, but grid arrays can get + # big fast, and having such a large array in each thread could + # cause memory use issues.) with nogil: # TODO make this parallel without using too much memory for j in range(0, posx.shape[0]): @@ -1660,23 +1745,26 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, if(pbar is not None): pbar.update(50000) PyErr_CheckSignals() + # end with gil xiter[1] = yiter[1] = ziter[1] = 999 xiterv[1] = yiterv[1] = ziterv[1] = 0.0 - if check_period == 1: + if periodic[0] == 1: if posx[j] - hsml[j] < x_min: xiter[1] = +1 xiterv[1] = period_x elif posx[j] + hsml[j] > x_max: xiter[1] = -1 xiterv[1] = -period_x + if periodic[1] == 1: if posy[j] - hsml[j] < y_min: yiter[1] = +1 yiterv[1] = period_y elif posy[j] + hsml[j] > y_max: yiter[1] = -1 yiterv[1] = -period_y + if periodic[2] == 1: if posz[j] - hsml[j] < z_min: ziter[1] = +1 ziterv[1] = period_z @@ -1684,8 +1772,8 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, ziter[1] = -1 ziterv[1] = -period_z - h_j3 = fmax(hsml[j]*hsml[j]*hsml[j], dx*dy*dz) - h_j = math.cbrt(h_j3) + #h_j3 = fmax(hsml[j]*hsml[j]*hsml[j], dx*dy*dz) + h_j = hsml[j] #math.cbrt(h_j3) h_j2 = h_j*h_j ih_j = 1/h_j @@ -1746,11 +1834,17 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, continue # see equation 4 of the SPLASH paper - q_ij = math.sqrt(posx_diff + posy_diff + posz_diff) * ih_j + q_ij = math.sqrt(posx_diff + + posy_diff + + posz_diff) * ih_j if q_ij >= 1: continue - - buff[xi, yi, zi] += prefactor_j * kernel(q_ij) + # shared variable buff should not + # be mutatated in a nogil section + # where different threads may change + # the same array element + buff[xi, yi, zi] += prefactor_j \ + * kernel(q_ij) def pixelize_element_mesh_line(np.ndarray[np.float64_t, ndim=2] coords, @@ -1857,16 +1951,16 @@ def pixelize_element_mesh_line(np.ndarray[np.float64_t, ndim=2] coords, free(field_vals) return arc_length, plot_values - +# intended for use in ParticleImageBuffer @cython.boundscheck(False) @cython.wraparound(False) -def rotate_particle_coord(np.float64_t[:] px, - np.float64_t[:] py, - np.float64_t[:] pz, - center, - width, - normal_vector, - north_vector): +def rotate_particle_coord_pib(np.float64_t[:] px, + np.float64_t[:] py, + np.float64_t[:] pz, + center, + width, + normal_vector, + north_vector): # We want to do two rotations, one to first rotate our coordinates to have # the normal vector be the z-axis (i.e., the viewer's perspective), and then # another rotation to make the north-vector be the y-axis (i.e., north). @@ -1909,6 +2003,89 @@ def rotate_particle_coord(np.float64_t[:] px, return px_rotated, py_rotated, rot_bounds_x0, rot_bounds_x1, rot_bounds_y0, rot_bounds_y1 +# version intended for SPH off-axis slices/projections +# includes dealing with periodic boundaries, but also +# shifts particles so center -> origin. +# therefore, don't want to use this in the ParticleImageBuffer, +# which expects differently centered coordinates. +@cython.boundscheck(False) +@cython.wraparound(False) +def rotate_particle_coord(np.float64_t[:] px, + np.float64_t[:] py, + np.float64_t[:] pz, + center, + bounds, + periodic, + width, + depth, + normal_vector, + north_vector): + # We want to do two rotations, one to first rotate our coordinates to have + # the normal vector be the z-axis (i.e., the viewer's perspective), and then + # another rotation to make the north-vector be the y-axis (i.e., north). + # Fortunately, total_rotation_matrix = rotation_matrix_1 x rotation_matrix_2 + cdef np.int64_t num_particles = np.size(px) + cdef np.float64_t[:] z_axis = np.array([0., 0., 1.], dtype="float64") + cdef np.float64_t[:] y_axis = np.array([0., 1., 0.], dtype="float64") + cdef np.float64_t[:, :] normal_rotation_matrix + cdef np.float64_t[:] transformed_north_vector + cdef np.float64_t[:, :] north_rotation_matrix + cdef np.float64_t[:, :] rotation_matrix + + normal_rotation_matrix = get_rotation_matrix(normal_vector, z_axis) + transformed_north_vector = np.matmul(normal_rotation_matrix, north_vector) + north_rotation_matrix = get_rotation_matrix(transformed_north_vector, y_axis) + rotation_matrix = np.matmul(north_rotation_matrix, normal_rotation_matrix) + + cdef np.float64_t[:] px_rotated = np.empty(num_particles, dtype="float64") + cdef np.float64_t[:] py_rotated = np.empty(num_particles, dtype="float64") + cdef np.float64_t[:] pz_rotated = np.empty(num_particles, dtype="float64") + cdef np.float64_t[:] coordinate_matrix = np.empty(3, dtype="float64") + cdef np.float64_t[:] rotated_coordinates + cdef np.float64_t[:] rotated_center + cdef np.int64_t i + cdef int ax + #rotated_center = rotation_matmul( + # rotation_matrix, np.array([center[0], center[1], center[2]])) + rotated_center = np.zeros((3,), dtype=center.dtype) + # set up the rotated bounds + cdef np.float64_t rot_bounds_x0 = rotated_center[0] - 0.5 * width[0] + cdef np.float64_t rot_bounds_x1 = rotated_center[0] + 0.5 * width[0] + cdef np.float64_t rot_bounds_y0 = rotated_center[1] - 0.5 * width[1] + cdef np.float64_t rot_bounds_y1 = rotated_center[1] + 0.5 * width[1] + cdef np.float64_t rot_bounds_z0 = rotated_center[2] - 0.5 * depth + cdef np.float64_t rot_bounds_z1 = rotated_center[2] + 0.5 * depth + for i in range(num_particles): + coordinate_matrix[0] = px[i] + coordinate_matrix[1] = py[i] + coordinate_matrix[2] = pz[i] + + # centering: + # make sure this also works for centers close to periodic edges + # added consequence: the center is placed at the origin + # (might as well keep it there in these temporary coordinates) + for ax in range(3): + # assumed center is zero even if non-periodic + coordinate_matrix[ax] -= center[ax] + if not periodic[ax]: continue + period = bounds[2 * ax + 1] - bounds[2 * ax] + # abs. difference between points in the volume is <= period + if coordinate_matrix[ax] < -0.5 * period: + coordinate_matrix[ax] += period + if coordinate_matrix[ax] > 0.5 * period: + coordinate_matrix[ax] -= period + + rotated_coordinates = rotation_matmul( + rotation_matrix, coordinate_matrix) + px_rotated[i] = rotated_coordinates[0] + py_rotated[i] = rotated_coordinates[1] + pz_rotated[i] = rotated_coordinates[2] + + return (px_rotated, py_rotated, pz_rotated, + rot_bounds_x0, rot_bounds_x1, + rot_bounds_y0, rot_bounds_y1, + rot_bounds_z0, rot_bounds_z1) + @cython.boundscheck(False) @cython.wraparound(False) @@ -1921,33 +2098,95 @@ def off_axis_projection_SPH(np.float64_t[:] px, bounds, center, width, + periodic, np.float64_t[:] quantity_to_smooth, np.float64_t[:, :] projection_array, np.uint8_t[:, :] mask, normal_vector, north_vector, - weight_field=None): + weight_field=None, + depth=None, + kernel_name="cubic"): + # periodic: periodicity of the data set: # Do nothing in event of a 0 normal vector if np.allclose(normal_vector, 0.): return - - px_rotated, py_rotated, \ + if depth is None: + # set to volume diagonal + margin -> won't exclude anything + depth = 2. * np.sqrt((bounds[1] - bounds[0])**2 + + (bounds[3] - bounds[2])**2 + + (bounds[5] - bounds[4])**2) + px_rotated, py_rotated, pz_rotated, \ rot_bounds_x0, rot_bounds_x1, \ - rot_bounds_y0, rot_bounds_y1 = rotate_particle_coord(px, py, pz, - center, width, normal_vector, north_vector) - + rot_bounds_y0, rot_bounds_y1, \ + rot_bounds_z0, rot_bounds_z1 = rotate_particle_coord(px, py, pz, + center, bounds, + periodic, + width, depth, + normal_vector, + north_vector) + # check_period=0: assumed to be a small region compared to the box + # size. The rotation already ensures that a center close to a + # periodic edge works out fine. + # since the simple single-coordinate modulo math periodicity + # does not apply to the *rotated* coordinates, the periodicity + # approach implemented for this along-axis projection method + # would fail here + check_period = np.array([0, 0, 0], dtype="int") pixelize_sph_kernel_projection(projection_array, mask, px_rotated, py_rotated, + pz_rotated, smoothing_lengths, particle_masses, particle_densities, quantity_to_smooth, [rot_bounds_x0, rot_bounds_x1, - rot_bounds_y0, rot_bounds_y1], + rot_bounds_y0, rot_bounds_y1, + rot_bounds_z0, rot_bounds_z1], weight_field=weight_field, - check_period=0) + _check_period=check_period, + kernel_name=kernel_name) + +# like slice pixelization, but for off-axis planes +def pixelize_sph_kernel_cutting( + np.float64_t[:, :] buff, + np.uint8_t[:, :] mask, + np.float64_t[:] posx, np.float64_t[:] posy, + np.float64_t[:] posz, + np.float64_t[:] hsml, np.float64_t[:] pmass, + np.float64_t[:] pdens, + np.float64_t[:] quantity_to_smooth, + center, widthxy, + normal_vector, north_vector, + boxbounds, periodic, + kernel_name="cubic", + int check_period=1): + + if check_period == 0: + periodic = np.zeros(3, dtype=bool) + + posx_rot, posy_rot, posz_rot, \ + rot_bounds_x0, rot_bounds_x1, \ + rot_bounds_y0, rot_bounds_y1, \ + rot_bounds_z0, _ = rotate_particle_coord(posx, posy, posz, + center, boxbounds, + periodic, + widthxy, 0., + normal_vector, + north_vector) + bounds_rot = np.array([rot_bounds_x0, rot_bounds_x1, + rot_bounds_y0, rot_bounds_y1]) + slicez_rot = rot_bounds_z0 + pixelize_sph_kernel_slice(buff, mask, + posx_rot, posy_rot, posz_rot, + hsml, pmass, pdens, quantity_to_smooth, + bounds_rot, slicez_rot, + kernel_name=kernel_name, + _check_period=np.zeros(3, dtype="int"), + period=None) + @cython.boundscheck(False) diff --git a/yt/utilities/lib/platform_dep.h b/yt/utilities/lib/platform_dep.h index a0227d60be..d2157040d5 100644 --- a/yt/utilities/lib/platform_dep.h +++ b/yt/utilities/lib/platform_dep.h @@ -1,59 +1,14 @@ #include #ifdef MS_WIN32 #include "malloc.h" +/* +note: the following implicitly sets a mininum VS version: conservative +minimum is _MSC_VER >= 1928 (VS 2019, 16.8), but may work for VS 2015 +but that has not been tested. see https://github.com/yt-project/yt/pull/4980 +and https://learn.microsoft.com/en-us/cpp/overview/visual-cpp-language-conformance +*/ #include -typedef int int32_t; -typedef long long int64_t; -/* Taken from http://siliconandlithium.blogspot.com/2014/05/msvc-c99-mathh-header.html */ -#define isnormal(x) ((_fpclass(x) == _FPCLASS_NN) || (_fpclass(x) == _FPCLASS_PN)) -static __inline double rint(double x){ - const double two_to_52 = 4.5035996273704960e+15; - double fa = fabs(x); - if(fa >= two_to_52){ - return x; - } else{ - return copysign(two_to_52 + fa - two_to_52, x); - } -} -#if _MSC_VER < 1928 -static __inline long int lrint(double x){ - return (long)rint(x); -} -#endif -static __inline double fmax(double x, double y){ - return (x > y) ? x : y; -} -static __inline double fmin(double x, double y){ - return (x < y) ? x : y; -} - -/* adapted from http://www.johndcook.com/blog/cpp_erf/ - code is under public domain license */ - -double erf(double x) -{ - /* constants */ - double a1 = 0.254829592; - double a2 = -0.284496736; - double a3 = 1.421413741; - double a4 = -1.453152027; - double a5 = 1.061405429; - double p = 0.3275911; - double t; - double y; - - /* Save the sign of x */ - int sign = 1; - if (x < 0) - sign = -1; - x = fabs(x); - - /* A&S formula 7.1.26 */ - t = 1.0/(1.0 + p*x); - y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x); - - return sign*y; -} +#include #elif defined(__FreeBSD__) #include #include diff --git a/yt/utilities/lib/quad_tree.pyx b/yt/utilities/lib/quad_tree.pyx index c08f4e9ca9..36d4f236b3 100644 --- a/yt/utilities/lib/quad_tree.pyx +++ b/yt/utilities/lib/quad_tree.pyx @@ -413,8 +413,6 @@ cdef class QuadTree: np.float64_t wtoadd, np.int64_t level): cdef int i, j, n - cdef np.float64_t *vorig - vorig = malloc(sizeof(np.float64_t) * self.nvals) if node.children[0][0] == NULL: if self.merged == -1: for i in range(self.nvals): @@ -432,6 +430,8 @@ cdef class QuadTree: iy[curpos] = node.pos[1] return 1 cdef np.int64_t added = 0 + cdef np.float64_t *vorig + vorig = malloc(sizeof(np.float64_t) * self.nvals) if self.merged == 1: for i in range(self.nvals): vorig[i] = vtoadd[i] diff --git a/yt/utilities/lib/tests/test_fill_region.py b/yt/utilities/lib/tests/test_fill_region.py index 28e04c3e2c..967987c0f8 100644 --- a/yt/utilities/lib/tests/test_fill_region.py +++ b/yt/utilities/lib/tests/test_fill_region.py @@ -1,8 +1,13 @@ +import sys + import numpy as np from numpy.testing import assert_equal from yt.utilities.lib.misc_utilities import fill_region +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + NDIM = 32 @@ -38,5 +43,5 @@ def test_fill_region(): np.array([2, 2, 2], dtype="i8"), ) for r in range(level + 1): - for o, i in zip(output_fields, v): + for o, i in zip(output_fields, v, strict=True): assert_equal(o[r::rf, r::rf, r::rf], i) diff --git a/yt/utilities/on_demand_imports.py b/yt/utilities/on_demand_imports.py index e77a482e34..839617f4a5 100644 --- a/yt/utilities/on_demand_imports.py +++ b/yt/utilities/on_demand_imports.py @@ -180,8 +180,8 @@ def __init__(self, pkg_name, exc: Optional[BaseException] = None): # relatively short. Discussion related to this is in # yt-project/yt#1966 self.error = ImportError( - "This functionality requires the %s " - "package to be installed." % self.pkg_name + f"This functionality requires the {self.pkg_name} " + "package to be installed." ) else: self.error = ImportError( diff --git a/yt/utilities/particle_generator.py b/yt/utilities/particle_generator.py index c41bd68a95..a0957e9b38 100644 --- a/yt/utilities/particle_generator.py +++ b/yt/utilities/particle_generator.py @@ -36,7 +36,7 @@ def __init__(self, ds, num_particles, field_list, ptype="io"): except Exception as e: raise KeyError( "You must specify position fields: " - + " ".join("particle_position_%s" % ax for ax in "xyz") + + " ".join(f"particle_position_{ax}" for ax in "xyz") ) from e self.index_index = self.field_list.index((ptype, "particle_index")) diff --git a/yt/utilities/sdf.py b/yt/utilities/sdf.py index 2ee44c1716..47083281f8 100644 --- a/yt/utilities/sdf.py +++ b/yt/utilities/sdf.py @@ -1,4 +1,5 @@ import os +import sys from collections import UserDict from io import StringIO @@ -6,6 +7,9 @@ from yt.funcs import mylog +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def get_thingking_deps(): try: @@ -1175,7 +1179,7 @@ def get_key_data(self, key, fields): def iter_slice_data(self, slice_dim, slice_index, fields): mask, offsets, lengths = self.get_slice_chunks(slice_dim, slice_index) - for off, l in zip(offsets, lengths): + for off, l in zip(offsets, lengths, strict=True): data = {} chunk = slice(off, off + l) for field in fields: diff --git a/yt/utilities/tests/test_chemical_formulas.py b/yt/utilities/tests/test_chemical_formulas.py index 1d2fa54afd..7b5b933063 100644 --- a/yt/utilities/tests/test_chemical_formulas.py +++ b/yt/utilities/tests/test_chemical_formulas.py @@ -1,8 +1,13 @@ +import sys + from numpy.testing import assert_allclose, assert_equal from yt.utilities.chemical_formulas import ChemicalFormula, compute_mu from yt.utilities.periodic_table import periodic_table +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + _molecules = ( ("H2O_p1", (("H", 2), ("O", 1)), 1), ("H2O_m1", (("H", 2), ("O", 1)), -1), @@ -19,7 +24,7 @@ def test_formulas(): w = sum(n * periodic_table[e].weight for e, n in components) assert_equal(f.charge, charge) assert_equal(f.weight, w) - for (n, c1), (e, c2) in zip(components, f.elements): + for (n, c1), (e, c2) in zip(components, f.elements, strict=True): assert_equal(n, e.symbol) assert_equal(c1, c2) diff --git a/yt/utilities/tests/test_interpolators.py b/yt/utilities/tests/test_interpolators.py index e55796e806..aa2fff5466 100644 --- a/yt/utilities/tests/test_interpolators.py +++ b/yt/utilities/tests/test_interpolators.py @@ -1,3 +1,5 @@ +import sys + import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal @@ -5,6 +7,9 @@ from yt.testing import fake_random_ds from yt.utilities.lib.interpolators import ghost_zone_interpolate +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def test_linear_interpolator_1d(): random_data = np.random.random(64) @@ -26,7 +31,7 @@ def test_linear_interpolator_1d(): def test_linear_interpolator_2d(): random_data = np.random.random((64, 64)) # evenly spaced bins - fv = dict(zip("xyz", np.mgrid[0.0:1.0:64j, 0.0:1.0:64j])) + fv = dict(zip("xy", np.mgrid[0.0:1.0:64j, 0.0:1.0:64j], strict=True)) bfi = lin.BilinearFieldInterpolator(random_data, (0.0, 1.0, 0.0, 1.0), "xy", True) assert_array_equal(bfi(fv), random_data) @@ -45,7 +50,7 @@ def test_linear_interpolator_2d(): def test_linear_interpolator_3d(): random_data = np.random.random((64, 64, 64)) # evenly spaced bins - fv = dict(zip("xyz", np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j])) + fv = dict(zip("xyz", np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j], strict=True)) tfi = lin.TrilinearFieldInterpolator( random_data, (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True ) @@ -70,7 +75,13 @@ def test_linear_interpolator_3d(): def test_linear_interpolator_4d(): random_data = np.random.random((64, 64, 64, 64)) # evenly spaced bins - fv = dict(zip("xyzw", np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j])) + fv = dict( + zip( + "xyzw", + np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j], + strict=True, + ) + ) tfi = lin.QuadrilinearFieldInterpolator( random_data, (0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyzw", True ) diff --git a/yt/visualization/_commons.py b/yt/visualization/_commons.py index 49fdc33c15..3deaa5bfc8 100644 --- a/yt/visualization/_commons.py +++ b/yt/visualization/_commons.py @@ -10,9 +10,7 @@ from yt.config import ytcfg -if sys.version_info >= (3, 10): - pass -else: +if sys.version_info < (3, 10): from yt._maintenance.backports import zip if TYPE_CHECKING: @@ -258,8 +256,8 @@ def get_default_from_config(data_source, *, field, keys, defaults): def _get_units_label(units: str) -> str: if r"\frac" in units: - return r"$\ \ \left(%s\right)$" % units + return rf"$\ \ \left({units}\right)$" elif units: - return r"$\ \ (%s)$" % units + return rf"$\ \ ({units})$" else: return "" diff --git a/yt/visualization/_handlers.py b/yt/visualization/_handlers.py index 659b1eb6d3..fd4b381a76 100644 --- a/yt/visualization/_handlers.py +++ b/yt/visualization/_handlers.py @@ -431,7 +431,7 @@ def __init__( self._draw_minorticks = draw_minorticks self._cmap: Optional[Colormap] = None self._set_cmap(cmap) - self._background_color: Optional["ColorType"] = background_color + self._background_color: Optional[ColorType] = background_color @property def draw_cbar(self) -> bool: diff --git a/yt/visualization/base_plot_types.py b/yt/visualization/base_plot_types.py index 263a563f69..94792c32d3 100644 --- a/yt/visualization/base_plot_types.py +++ b/yt/visualization/base_plot_types.py @@ -247,7 +247,7 @@ def __init__( ): """Initialize ImagePlotMPL class object""" - self._transform: Optional["Transform"] + self._transform: Optional[Transform] setdefaultattr(self, "_transform", None) self.colorbar_handler = colorbar_handler @@ -332,7 +332,7 @@ def _init_image(self, data, extent, aspect, *, alpha: AlphaT = None): self._set_axes() def _set_axes(self) -> None: - fmt_kwargs: "FormatKwargs" = { + fmt_kwargs: FormatKwargs = { "style": "scientific", "scilimits": (-2, 3), "useMathText": True, @@ -341,9 +341,22 @@ def _set_axes(self) -> None: self.image.axes.set_facecolor(self.colorbar_handler.background_color) self.cax.tick_params(which="both", direction="in") - self.cb = self.figure.colorbar(self.image, self.cax) - cb_axis: "Axis" + # For creating a multipanel plot by ImageGrid + # we may need the location keyword, which requires Matplotlib >= 3.7.0 + cb_location = getattr(self.cax, "orientation", None) + if matplotlib.__version_info__ >= (3, 7): + self.cb = self.figure.colorbar(self.image, self.cax, location=cb_location) + else: + if cb_location in ["top", "bottom"]: + warnings.warn( + "Cannot properly set the orientation of colorbar. " + "Consider upgrading matplotlib to version 3.7 or newer", + stacklevel=6, + ) + self.cb = self.figure.colorbar(self.image, self.cax) + + cb_axis: Axis if self.cb.orientation == "vertical": cb_axis = self.cb.ax.yaxis else: @@ -526,9 +539,12 @@ def _toggle_colorbar(self, choice: bool): def _get_labels(self): labels = super()._get_labels() - cbax = self.cb.ax - labels += cbax.yaxis.get_ticklabels() - labels += [cbax.yaxis.label, cbax.yaxis.get_offset_text()] + if getattr(self.cb, "orientation", "vertical") == "horizontal": + cbaxis = self.cb.ax.xaxis + else: + cbaxis = self.cb.ax.yaxis + labels += cbaxis.get_ticklabels() + labels += [cbaxis.label, cbaxis.get_offset_text()] return labels def hide_axes(self, *, draw_frame=None): diff --git a/yt/visualization/fits_image.py b/yt/visualization/fits_image.py index 85a098fa30..8742674e4a 100644 --- a/yt/visualization/fits_image.py +++ b/yt/visualization/fits_image.py @@ -25,6 +25,9 @@ ) from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class UnitfulHDU: def __init__(self, hdu): @@ -250,7 +253,7 @@ def __init__( self.fields[i] = f"{ftype}_{fname}" for is_first, _is_last, (i, (name, field)) in mark_ends( - enumerate(zip(self.fields, fields)) + enumerate(zip(self.fields, fields, strict=True)) ): if name not in exclude_fields: this_img = img_data[field] @@ -345,10 +348,13 @@ def __init__( width = [width] * self.dimensionality if isinstance(width[0], YTQuantity): cdelt = [ - wh.to_value(wcs_unit) / n for wh, n in zip(width, self.shape) + wh.to_value(wcs_unit) / n + for wh, n in zip(width, self.shape, strict=True) ] else: - cdelt = [float(wh) / n for wh, n in zip(width, self.shape)] + cdelt = [ + float(wh) / n for wh, n in zip(width, self.shape, strict=True) + ] center = img_ctr[: self.dimensionality] w.wcs.crpix = 0.5 * (np.array(self.shape) + 1) w.wcs.crval = center @@ -390,7 +396,7 @@ def _set_units(self, ds, base_units): "magnetic_unit", ) cgs_units = ("cm", "g", "s", "cm/s", "gauss") - for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units): + for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units, strict=True): if unit is None: if ds is not None: u = getattr(ds, attr, None) @@ -1648,6 +1654,7 @@ def __init__( north_vector=north_vector, method=method, weight=weight_field, + depth=depth, ).swapaxes(0, 1) if moment == 2: @@ -1675,6 +1682,7 @@ def _sq_field(field, data, item: FieldKey): north_vector=north_vector, method=method, weight=weight_field, + depth=depth, ).swapaxes(0, 1) buf[key] = compute_stddev_image(buff2, buf[key]) diff --git a/yt/visualization/fixed_resolution.py b/yt/visualization/fixed_resolution.py index 05fdae3db8..272bd765c8 100644 --- a/yt/visualization/fixed_resolution.py +++ b/yt/visualization/fixed_resolution.py @@ -17,7 +17,7 @@ ) from yt.utilities.lib.pixelization_routines import ( pixelize_cylinder, - rotate_particle_coord, + rotate_particle_coord_pib, ) from yt.utilities.math_utils import compute_stddev_image from yt.utilities.on_demand_imports import _h5py as h5py @@ -134,7 +134,7 @@ def __init__( # the filter methods for the present class are defined only when # fixed_resolution_filters is imported, so we need to guarantee # that it happens no later than instantiation - from yt.visualization.fixed_resolution_filters import ( + from yt.visualization.fixed_resolution_filters import ( # noqa FixedResolutionBufferFilter, ) @@ -630,6 +630,8 @@ def _generate_image_and_mask(self, item) -> None: self.buff_size[1], ) dd = self.data_source + # only need the first two for SPH, + # but need the third one for other data formats. width = self.ds.arr( ( self.bounds[1] - self.bounds[0], @@ -637,6 +639,7 @@ def _generate_image_and_mask(self, item) -> None: self.bounds[5] - self.bounds[4], ) ) + depth = dd.depth[0] if dd.depth is not None else None buff = off_axis_projection( dd.dd, dd.center, @@ -649,6 +652,7 @@ def _generate_image_and_mask(self, item) -> None: no_ghost=dd.no_ghost, interpolated=dd.interpolated, north_vector=dd.north_vector, + depth=depth, method=dd.method, ) if self.data_source.moment == 2: @@ -679,6 +683,7 @@ def _sq_field(field, data, item: FieldKey): no_ghost=dd.no_ghost, interpolated=dd.interpolated, north_vector=dd.north_vector, + depth=dd.depth, method=dd.method, ) buff = compute_stddev_image(buff2, buff) @@ -745,7 +750,7 @@ def _generate_image_and_mask(self, item) -> None: if hasattr(w, "to_value"): w = w.to_value("code_length") wd.append(w) - x_data, y_data, *bounds = rotate_particle_coord( + x_data, y_data, *bounds = rotate_particle_coord_pib( dd[ftype, "particle_position_x"].to_value("code_length"), dd[ftype, "particle_position_y"].to_value("code_length"), dd[ftype, "particle_position_z"].to_value("code_length"), @@ -825,7 +830,7 @@ def _generate_image_and_mask(self, item) -> None: norm = self.ds.quan(dpx * dpy, "code_length**2").in_base() buff /= norm.v units = data.units / norm.units - info["label"] = "%s $\\rm{Density}$" % info["label"] + info["label"] += " $\\rm{Density}$" else: units = data.units diff --git a/yt/visualization/image_writer.py b/yt/visualization/image_writer.py index e9326d23cc..1dcd7490fe 100644 --- a/yt/visualization/image_writer.py +++ b/yt/visualization/image_writer.py @@ -1,7 +1,6 @@ -import builtins - import numpy as np +from yt._maintenance.ipython_compat import IS_IPYTHON from yt.config import ytcfg from yt.funcs import mylog from yt.units.yt_array import YTQuantity @@ -137,7 +136,7 @@ def write_bitmap(bitmap_array, filename, max_val=None, transpose=False): if len(bitmap_array.shape) != 3 or bitmap_array.shape[-1] not in (3, 4): raise RuntimeError( "Expecting image array of shape (N,M,3) or " - "(N,M,4), received %s" % str(bitmap_array.shape) + f"(N,M,4), received {str(bitmap_array.shape)}" ) if bitmap_array.dtype != np.uint8: @@ -418,7 +417,7 @@ def display_in_notebook(image, max_val=None): three channels. """ - if "__IPYTHON__" in dir(builtins): + if IS_IPYTHON: from IPython.core.displaypub import publish_display_data data = write_bitmap(image, None, max_val=max_val) diff --git a/yt/visualization/plot_container.py b/yt/visualization/plot_container.py index 9cdb39df11..44e150a663 100644 --- a/yt/visualization/plot_container.py +++ b/yt/visualization/plot_container.py @@ -1,6 +1,5 @@ import abc import base64 -import builtins import os import warnings from collections import defaultdict @@ -12,6 +11,7 @@ from unyt.dimensions import length from yt._maintenance.deprecation import issue_deprecation_warning +from yt._maintenance.ipython_compat import IS_IPYTHON from yt._typing import FieldKey, Quantity from yt.config import ytcfg from yt.data_objects.time_series import DatasetSeries @@ -634,7 +634,7 @@ def show(self): for v in sorted(self.plots.values()): v.show() else: - if "__IPYTHON__" in dir(builtins): + if IS_IPYTHON: from IPython.display import display display(self) @@ -1033,7 +1033,7 @@ def set_zlim( issue_deprecation_warning( "Passing `zmax=None` explicitly is deprecated. " "If you wish to explicitly set zmax to the maximal " - "data value, pass `zmin='max'` instead. " + "data value, pass `zmax='max'` instead. " "Otherwise leave this argument unset.", since="4.1", stacklevel=5, diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index fd90ca03cc..1b4ced9e31 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -51,6 +51,8 @@ else: from typing_extensions import TypeGuard + from yt._maintenance.backports import zip + if sys.version_info >= (3, 11): from typing import assert_never else: @@ -483,7 +485,7 @@ def __call__(self, plot) -> "BaseQuiverCallback": ) else: assert_never(geometry) - qcb: "BaseQuiverCallback" + qcb: BaseQuiverCallback if plot._type_name == "CuttingPlane": qcb = CuttingQuiverCallback( (ftype, "cutting_plane_velocity_x"), @@ -609,7 +611,7 @@ def __call__(self, plot) -> "BaseQuiverCallback": ftype = plot.data._current_fluid_type # Instantiation of these is cheap geometry: Geometry = plot.data.ds.geometry - qcb: "BaseQuiverCallback" + qcb: BaseQuiverCallback if plot._type_name == "CuttingPlane": if geometry is Geometry.CARTESIAN: pass @@ -745,7 +747,7 @@ def __call__(self, plot): # do the transformation. Also check for the exact bounds of the transform # which can cause issues with projections. tform_bnds = plot._transform.x_limits + plot._transform.y_limits - if any(b.d == tb for b, tb in zip(bounds, tform_bnds)): + if any(b.d == tb for b, tb in zip(bounds, tform_bnds, strict=True)): # note: cartopy will also raise its own warning, but it is useful to add this # warning as well since the only way to avoid the exact bounds is to change the # extent of the plot. @@ -1167,7 +1169,7 @@ def __call__(self, plot): GRE = GRE[new_indices] block_ids = np.array(block_ids)[new_indices] - for px_off, py_off in zip(pxs.ravel(), pys.ravel()): + for px_off, py_off in zip(pxs.ravel(), pys.ravel(), strict=True): pxo = px_off * DW[px_index] pyo = py_off * DW[py_index] left_edge_x = np.array((GLE[:, px_index] + pxo - x0) * dx) + xx0 @@ -1244,9 +1246,9 @@ def __call__(self, plot): y[i] = right_edge_y[n] - (12 * (yy1 - yy0) / ypix) else: raise RuntimeError( - "Unrecognized id_loc value ('%s'). " + f"Unrecognized id_loc value ({self.id_loc!r}). " "Allowed values are 'lower left', lower right', " - "'upper left', and 'upper right'." % self.id_loc + "'upper left', and 'upper right'." ) xi, yi = self._sanitize_xy_order(plot, x[i], y[i]) plot._axes.text(xi, yi, "%d" % block_ids[n], clip_on=True) diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index bb6cae9c9d..4a52e803f8 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -2,7 +2,7 @@ import sys from collections import defaultdict from numbers import Number -from typing import Optional, Union +from typing import TYPE_CHECKING, Optional, Union import matplotlib import numpy as np @@ -12,6 +12,8 @@ from yt._maintenance.deprecation import issue_deprecation_warning from yt._typing import AlphaT from yt.data_objects.image_array import ImageArray +from yt.frontends.sph.data_structures import ParticleDataset +from yt.frontends.stream.data_structures import StreamParticlesDataset from yt.frontends.ytdata.data_structures import YTSpatialPlotDataset from yt.funcs import ( fix_axis, @@ -20,6 +22,7 @@ iter_fields, mylog, obj_length, + parse_center_array, validate_moment, ) from yt.geometry.api import Geometry @@ -55,9 +58,10 @@ invalidate_plot, ) -if sys.version_info >= (3, 10): - pass -else: +if TYPE_CHECKING: + from yt.visualization.plot_modifications import PlotCallback + +if sys.version_info < (3, 10): from yt._maintenance.backports import zip if sys.version_info >= (3, 11): @@ -80,20 +84,33 @@ def get_window_parameters(axis, center, width, ds): return (bounds, center, display_center) -def get_oblique_window_parameters(normal, center, width, ds, depth=None): +def get_oblique_window_parameters( + normal, center, width, ds, depth=None, get3bounds=False +): center, display_center = ds.coordinates.sanitize_center(center, axis=None) width = ds.coordinates.sanitize_width(normal, width, depth) if len(width) == 2: # Transforming to the cutting plane coordinate system - center = (center - ds.domain_left_edge) / ds.domain_width - 0.5 + # the original dimensionless center messes up off-axis + # SPH projections though -> don't use this center there + center = ( + (center - ds.domain_left_edge) / ds.domain_width - 0.5 + ) * ds.domain_width (normal, perp1, perp2) = ortho_find(normal) mat = np.transpose(np.column_stack((perp1, perp2, normal))) center = np.dot(mat, center) w = tuple(el.in_units("code_length") for el in width) bounds = tuple(((2 * (i % 2)) - 1) * w[i // 2] / 2 for i in range(len(w) * 2)) - + if get3bounds and depth is None: + # off-axis projection, depth not specified + # -> set 'large enough' depth using half the box diagonal + margin + d2 = ds.domain_width[0].in_units("code_length") ** 2 + d2 += ds.domain_width[1].in_units("code_length") ** 2 + d2 += ds.domain_width[2].in_units("code_length") ** 2 + diag = np.sqrt(d2) + bounds = bounds + (-0.51 * diag, 0.51 * diag) return (bounds, center) @@ -868,7 +885,6 @@ def __init__(self, *args, **kwargs) -> None: # the filter methods for the present class are defined only when # fixed_resolution_filters is imported, so we need to guarantee # that it happens no later than instantiation - from yt.visualization.plot_modifications import PlotCallback self._callbacks: list[PlotCallback] = [] @@ -1195,9 +1211,9 @@ def _setup_plots(self): if colorbar_label is None: colorbar_label = image.info["label"] if getattr(self, "moment", 1) == 2: - colorbar_label = "%s \\rm{Standard Deviation}" % colorbar_label + colorbar_label = f"{colorbar_label} \\rm{{Standard Deviation}}" if hasattr(self, "projected"): - colorbar_label = "$\\rm{Projected }$ %s" % colorbar_label + colorbar_label = f"$\\rm{{Projected }}$ {colorbar_label}" if units is not None and units != "": colorbar_label += _get_units_label(units) @@ -1818,9 +1834,12 @@ def __init__( normal = self.sanitize_normal_vector(ds, normal) # this will handle time series data and controllers axis = fix_axis(normal, ds) + # print('center at SlicePlot init: ', center) + # print('current domain left edge: ', ds.domain_left_edge) (bounds, center, display_center) = get_window_parameters( axis, center, width, ds ) + # print('center after get_window_parameters: ', center) if field_parameters is None: field_parameters = {} @@ -2218,7 +2237,9 @@ def __init__( f"off-axis slices are not supported for {ds.geometry!r} geometry\n" f"currently supported geometries: {self._supported_geometries!r}" ) - + # bounds are in cutting plane coordinates, centered on 0: + # [xmin, xmax, ymin, ymax]. Can derive width/height back + # from these. unit is code_length (bounds, center_rot) = get_oblique_window_parameters(normal, center, width, ds) if field_parameters is None: field_parameters = {} @@ -2273,6 +2294,7 @@ def __init__( le=None, re=None, north_vector=None, + depth=None, method="integrate", data_source=None, *, @@ -2284,6 +2306,7 @@ def __init__( self.axis = None # always true for oblique data objects self.normal_vector = normal_vector self.width = width + self.depth = depth if data_source is None: self.dd = ds.all_data() else: @@ -2421,7 +2444,7 @@ def __init__( fields, center="center", width=None, - depth=(1, "1"), + depth=None, axes_unit=None, weight_field=None, max_level=None, @@ -2439,22 +2462,54 @@ def __init__( ): if ds.geometry not in self._supported_geometries: raise NotImplementedError( - f"off-axis slices are not supported for {ds.geometry!r} geometry\n" - f"currently supported geometries: {self._supported_geometries!r}" + "off-axis slices are not supported" + f" for {ds.geometry!r} geometry\n" + "currently supported geometries:" + f" {self._supported_geometries!r}" ) - + # center_rot normalizes the center to (0,0), + # units match bounds + # for SPH data, we want to input the original center + # the cython backend handles centering to this point and + # rotation. + # get3bounds gets a depth 0.5 * diagonal + margin in the + # depth=None case. (bounds, center_rot) = get_oblique_window_parameters( - normal, center, width, ds, depth=depth + normal, + center, + width, + ds, + depth=depth, + get3bounds=True, ) + # will probably fail if you try to project an SPH and non-SPH + # field in a single call + # checks for SPH fields copied from the + # _ortho_pixelize method in cartesian_coordinates.py + + ## data_source might be None here + ## (OffAxisProjectionDummyDataSource gets used later) + if data_source is None: + data_source = ds.all_data() + field = data_source._determine_fields(fields)[0] + finfo = data_source.ds.field_info[field] + is_sph_field = finfo.is_sph_field + particle_datasets = (ParticleDataset, StreamParticlesDataset) + + if isinstance(data_source.ds, particle_datasets) and is_sph_field: + center_use = parse_center_array(center, ds=data_source.ds, axis=None) + else: + center_use = center_rot fields = list(iter_fields(fields))[:] - oap_width = ds.arr( - (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]) - ) + # oap_width = ds.arr( + # (bounds[1] - bounds[0], + # bounds[3] - bounds[2]) + # ) OffAxisProj = OffAxisProjectionDummyDataSource( - center_rot, + center_use, ds, normal, - oap_width, + width, fields, interpolated, weight=weight_field, @@ -2463,6 +2518,7 @@ def __init__( le=le, re=re, north_vector=north_vector, + depth=depth, method=method, data_source=data_source, moment=moment, diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index 004352cf90..6f354ee529 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -1,14 +1,13 @@ import base64 -import builtins import os -from collections.abc import Iterable from functools import wraps -from typing import Any, Optional, Union +from typing import TYPE_CHECKING, Any, Optional, Union import matplotlib import numpy as np from more_itertools.more import always_iterable, unzip +from yt._maintenance.ipython_compat import IS_IPYTHON from yt._typing import FieldKey from yt.data_objects.profiles import create_profile, sanitize_field_tuple_keys from yt.data_objects.static_output import Dataset @@ -28,6 +27,11 @@ validate_plot, ) +if TYPE_CHECKING: + from collections.abc import Iterable + + from yt._typing import FieldKey + def invalidate_profile(f): @wraps(f) @@ -342,7 +346,7 @@ def show(self): >>> pp.show() """ - if "__IPYTHON__" in dir(builtins): + if IS_IPYTHON: from IPython.display import display display(self) diff --git a/yt/visualization/tests/test_image_comp_2D_plots.py b/yt/visualization/tests/test_image_comp_2D_plots.py index 254c8e5338..371256c0eb 100644 --- a/yt/visualization/tests/test_image_comp_2D_plots.py +++ b/yt/visualization/tests/test_image_comp_2D_plots.py @@ -147,6 +147,40 @@ def test_particleprojectionplot_set_colorbar_properties(): return p.plots[field].figure +class TestMultipanelPlot: + @classmethod + def setup_class(cls): + cls.fields = [ + ("gas", "density"), + ("gas", "velocity_x"), + ("gas", "velocity_y"), + ("gas", "velocity_magnitude"), + ] + cls.ds = fake_random_ds(16) + + @pytest.mark.skipif( + mpl.__version_info__ < (3, 7), + reason="colorbar cannot currently be set horizontal in multi-panel plot with matplotlib older than 3.7.0", + ) + @pytest.mark.parametrize("cbar_location", ["top", "bottom", "left", "right"]) + @pytest.mark.mpl_image_compare + def test_multipanelplot_colorbar_orientation_simple(self, cbar_location): + p = SlicePlot(self.ds, "z", self.fields) + return p.export_to_mpl_figure((2, 2), cbar_location=cbar_location) + + @pytest.mark.parametrize("cbar_location", ["top", "bottom"]) + def test_multipanelplot_colorbar_orientation_warning(self, cbar_location): + p = SlicePlot(self.ds, "z", self.fields) + if mpl.__version_info__ < (3, 7): + with pytest.warns( + UserWarning, + match="Cannot properly set the orientation of colorbar.", + ): + p.export_to_mpl_figure((2, 2), cbar_location=cbar_location) + else: + p.export_to_mpl_figure((2, 2), cbar_location=cbar_location) + + class TestProfilePlot: @classmethod def setup_class(cls): diff --git a/yt/visualization/tests/test_offaxisprojection.py b/yt/visualization/tests/test_offaxisprojection.py index d317e31d8b..6c6b8e55eb 100644 --- a/yt/visualization/tests/test_offaxisprojection.py +++ b/yt/visualization/tests/test_offaxisprojection.py @@ -1,6 +1,7 @@ import itertools as it import os import shutil +import sys import tempfile import unittest @@ -13,10 +14,16 @@ fake_octree_ds, fake_random_ds, ) -from yt.visualization.api import OffAxisProjectionPlot, OffAxisSlicePlot +from yt.visualization.api import ( + OffAxisProjectionPlot, + OffAxisSlicePlot, +) from yt.visualization.image_writer import write_projection from yt.visualization.volume_rendering.api import off_axis_projection +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + # TODO: replace this with pytest.mark.parametrize def expand_keywords(keywords, full=False): @@ -85,7 +92,7 @@ def expand_keywords(keywords, full=False): keys = sorted(keywords) list_of_kwarg_dicts = np.array( [ - dict(zip(keys, prod)) + dict(zip(keys, prod, strict=True)) for prod in it.product(*(keywords[key] for key in keys)) ] ) @@ -235,10 +242,34 @@ def _vlos_sq(field, data): moment=2, buff_size=(400, 400), ) - assert_rel_equal( - np.sqrt( - p1.frb["gas", "velocity_los_squared"] - p1.frb["gas", "velocity_los"] ** 2 - ), - p2.frb["gas", "velocity_los"], - 10, + ## this failed because some - **2 values come out + ## marginally < 0, resulting in unmatched NaN values in the + ## first assert_rel_equal argument. The compute_stddev_image + ## function used in OffAxisProjectionPlot checks for and deals + ## with these cases. + # assert_rel_equal( + # np.sqrt( + # p1.frb["gas", "velocity_los_squared"] - p1.frb["gas", "velocity_los"] ** 2 + # ), + # p2.frb["gas", "velocity_los"], + # 10, + # ) + p1_expsq = p1.frb["gas", "velocity_los_squared"] + p1_sqexp = p1.frb["gas", "velocity_los"] ** 2 + # set values to zero that have **2 - **2 < 0, but + # the absolute values are much smaller than the smallest + # postive values of **2 and **2 + # (i.e., the difference is pretty much zero) + mindiff = 1e-10 * min( + np.min(p1_expsq[p1_expsq > 0]), np.min(p1_sqexp[p1_sqexp > 0]) + ) + # print(mindiff) + safeorbad = np.logical_not( + np.logical_and(p1_expsq - p1_sqexp < 0, p1_expsq - p1_sqexp > -1.0 * mindiff) ) + # avoid errors from sqrt(negative) + # sqrt in zeros_like insures correct units + p1res = np.zeros_like(np.sqrt(p1_expsq)) + p1res[safeorbad] = np.sqrt(p1_expsq[safeorbad] - p1_sqexp[safeorbad]) + p2res = p2.frb["gas", "velocity_los"] + assert_rel_equal(p1res, p2res, 10) diff --git a/yt/visualization/tests/test_offaxisprojection_pytestonly.py b/yt/visualization/tests/test_offaxisprojection_pytestonly.py new file mode 100644 index 0000000000..f63ac924c3 --- /dev/null +++ b/yt/visualization/tests/test_offaxisprojection_pytestonly.py @@ -0,0 +1,169 @@ +from typing import Union + +import numpy as np +import pytest +import unyt + +from yt.testing import ( + assert_rel_equal, + cubicspline_python, + fake_sph_flexible_grid_ds, + integrate_kernel, +) +from yt.visualization.api import ProjectionPlot + + +@pytest.mark.parametrize("weighted", [True, False]) +@pytest.mark.parametrize("periodic", [True, False]) +@pytest.mark.parametrize("depth", [None, (1.0, "cm"), (0.5, "cm")]) +@pytest.mark.parametrize("shiftcenter", [False, True]) +@pytest.mark.parametrize("northvector", [None, (1.0e-4, 1.0, 0.0)]) +def test_sph_proj_general_offaxis( + northvector: Union[tuple[float, float, float], None], + shiftcenter: bool, + depth: Union[tuple[float, str], None], + periodic: bool, + weighted: bool, +) -> None: + """ + Same as the on-axis projections, but we rotate the basis vectors + to test whether roations are handled ok. the rotation is chosen to + be small so that in/exclusion of particles within bboxes, etc. + works out the same way. + We just send lines of sight through pixel centers for convenience. + Particles at [0.5, 1.5, 2.5] (in each coordinate) + smoothing lengths 0.25 + all particles have mass 1., density 1.5, + except the single center particle, with mass 2., density 3. + + Parameters: + ----------- + northvector: tuple + y-axis direction in the final plot (direction vector) + shiftcenter: bool + shift the coordinates to center the projection on. + (The grid is offset to this same center) + depth: float or None + depth of the projection slice + periodic: bool + assume periodic boundary conditions, or not + weighted: bool + make a weighted projection (density-weighted density), or not + + Returns: + -------- + None + """ + if shiftcenter: + center = np.array((0.625, 0.625, 0.625)) # cm + else: + center = np.array((1.5, 1.5, 1.5)) # cm + bbox = unyt.unyt_array(np.array([[0.0, 3.0], [0.0, 3.0], [0.0, 3.0]]), "cm") + hsml_factor = 0.5 + unitrho = 1.5 + + # test correct centering, particle selection + def makemasses(i, j, k): + if i == j == k == 1: + return 2.0 + else: + return 1.0 + + # result shouldn't depend explicitly on the center if we re-center + # the data, unless we get cut-offs in the non-periodic case + # *almost* the z-axis + # try to make sure dl differences from periodic wrapping are small + epsilon = 1e-4 + projaxis = np.array([epsilon, 0.00, np.sqrt(1.0 - epsilon**2)]) + e1dir = projaxis / np.sqrt(np.sum(projaxis**2)) + # TODO: figure out other (default) axes for basis vectors here + if northvector is None: + e2dir = np.array([0.0, 1.0, 0.0]) + else: + e2dir = np.asarray(northvector) + e2dir = e2dir - np.sum(e1dir * e2dir) * e2dir # orthonormalize + e2dir /= np.sqrt(np.sum(e2dir**2)) + e3dir = np.cross(e1dir, e2dir) + + ds = fake_sph_flexible_grid_ds( + hsml_factor=hsml_factor, + nperside=3, + periodic=periodic, + offsets=np.full(3, 0.5), + massgenerator=makemasses, + unitrho=unitrho, + bbox=bbox.v, + recenter=center, + e1hat=e1dir, + e2hat=e2dir, + e3hat=e3dir, + ) + + source = ds.all_data() + # couple to dataset -> right unit registry + center = ds.arr(center, "cm") + # print('position:\n', source['gas','position']) + + # m / rho, factor 1. / hsml**2 is included in the kernel integral + # (density is adjusted, so same for center particle) + prefactor = 1.0 / unitrho # / (0.5 * 0.5)**2 + dl_cen = integrate_kernel(cubicspline_python, 0.0, 0.25) + + if weighted: + toweight_field = ("gas", "density") + else: + toweight_field = None + # we don't actually want a plot, it's just a straightforward, + # common way to get an frb / image array + prj = ProjectionPlot( + ds, + projaxis, + ("gas", "density"), + width=(2.5, "cm"), + weight_field=toweight_field, + buff_size=(5, 5), + center=center, + data_source=source, + north_vector=northvector, + depth=depth, + ) + img = prj.frb.data[("gas", "density")] + if weighted: + # periodic shifts will modify the (relative) dl values a bit + expected_out = np.zeros( + ( + 5, + 5, + ), + dtype=img.v.dtype, + ) + expected_out[::2, ::2] = unitrho + if depth is None: + expected_out[2, 2] *= 1.5 + else: + # only 2 * unitrho element included + expected_out[2, 2] *= 2.0 + else: + expected_out = np.zeros( + ( + 5, + 5, + ), + dtype=img.v.dtype, + ) + expected_out[::2, ::2] = dl_cen * prefactor * unitrho + if depth is None: + # 3 particles per l.o.s., including the denser one + expected_out *= 3.0 + expected_out[2, 2] *= 4.0 / 3.0 + else: + # 1 particle per l.o.s., including the denser one + expected_out[2, 2] *= 2.0 + # grid is shifted to the left -> 'missing' stuff at the left + if (not periodic) and shiftcenter: + expected_out[:1, :] = 0.0 + expected_out[:, :1] = 0.0 + # print(axis, shiftcenter, depth, periodic, weighted) + # print("expected:\n", expected_out) + # print("recovered:\n", img.v) + assert_rel_equal(expected_out, img.v, 4) diff --git a/yt/visualization/tests/test_particle_plot.py b/yt/visualization/tests/test_particle_plot.py index 409902a921..3243dd20e8 100644 --- a/yt/visualization/tests/test_particle_plot.py +++ b/yt/visualization/tests/test_particle_plot.py @@ -1,5 +1,6 @@ import os import shutil +import sys import tempfile import unittest from unittest import mock @@ -21,6 +22,9 @@ from yt.visualization.api import ParticlePhasePlot, ParticlePlot, ParticleProjectionPlot from yt.visualization.tests.test_plotwindow import ATTR_ARGS, WIDTH_SPECS +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def setup_module(): """Test specific setup.""" @@ -431,7 +435,7 @@ def test_particle_plot_offaxis(self): test_ds = fake_particle_ds() Ls = [[1, 1, 1], [0, 1, -0.5]] Ns = [None, [1, 1, 1]] - for L, N in zip(Ls, Ns): + for L, N in zip(Ls, Ns, strict=True): for weight_field in WEIGHT_FIELDS: pplot_off = ParticleProjectionPlot( test_ds, @@ -456,9 +460,12 @@ def test_creation_with_width(self): ylim = [plot.ds.quan(el[0], el[1]) for el in ylim] pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth] - [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)] - [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)] - [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)] + for px, x in zip(plot.xlim, xlim, strict=True): + assert_array_almost_equal(px, x, 14) + for py, y in zip(plot.ylim, ylim, strict=True): + assert_array_almost_equal(py, y, 14) + for pw, w in zip(plot.width, pwidth, strict=True): + assert_array_almost_equal(pw, w, 14) def test_particle_plot_instance(): diff --git a/yt/visualization/tests/test_plotwindow.py b/yt/visualization/tests/test_plotwindow.py index a9830e1d8c..b229dae8e4 100644 --- a/yt/visualization/tests/test_plotwindow.py +++ b/yt/visualization/tests/test_plotwindow.py @@ -1,5 +1,6 @@ import os import shutil +import sys import tempfile import unittest from collections import OrderedDict @@ -43,6 +44,9 @@ plot_2d, ) +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def setup_module(): """Test specific setup.""" @@ -386,9 +390,13 @@ def test_creation_with_width(self): ylim = [plot.ds.quan(el[0], el[1]) for el in ylim] pwidth = [plot.ds.quan(el[0], el[1]) for el in pwidth] - [assert_array_almost_equal(px, x, 14) for px, x in zip(plot.xlim, xlim)] - [assert_array_almost_equal(py, y, 14) for py, y in zip(plot.ylim, ylim)] - [assert_array_almost_equal(pw, w, 14) for pw, w in zip(plot.width, pwidth)] + for px, x in zip(plot.xlim, xlim, strict=True): + assert_array_almost_equal(px, x, 14) + for py, y in zip(plot.ylim, ylim, strict=True): + assert_array_almost_equal(py, y, 14) + for pw, w in zip(plot.width, pwidth, strict=True): + assert_array_almost_equal(pw, w, 14) + assert aun == plot._axes_unit_names diff --git a/yt/visualization/tests/test_save.py b/yt/visualization/tests/test_save.py index 31ec9e230a..2612e8054d 100644 --- a/yt/visualization/tests/test_save.py +++ b/yt/visualization/tests/test_save.py @@ -76,8 +76,8 @@ def test_suffix_clashing(ext, simple_sliceplot, tmp_path): target = (tmp_path / "myfile").with_suffix(ext) expected_warning = re.compile( - r"Received two valid image formats '%s' \(from filename\) " - r"and 'png' \(from suffix\)\. The former is ignored\." % ext[1:] + rf"Received two valid image formats {ext.removeprefix('.')!r} " + r"\(from filename\) and 'png' \(from suffix\)\. The former is ignored\." ) with pytest.warns(UserWarning, match=expected_warning): diff --git a/yt/visualization/volume_rendering/lens.py b/yt/visualization/volume_rendering/lens.py index 0e19b7e0a1..7335cebe34 100644 --- a/yt/visualization/volume_rendering/lens.py +++ b/yt/visualization/volume_rendering/lens.py @@ -137,10 +137,11 @@ def project_to_plane(self, camera, pos, res=None): return px, py, dz def __repr__(self): - disp = ":\n\tlens_type:plane-parallel\n\tviewpoint:%s" % ( - self.viewpoint + return ( + ":\n" + "\tlens_type:plane-parallel\n" + f"\tviewpoint:{self.viewpoint}" ) - return disp class PerspectiveLens(Lens): diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 392749633e..3c2359a3f8 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -30,6 +30,7 @@ def off_axis_projection( no_ghost=False, interpolated=False, north_vector=None, + depth=None, num_threads=1, method="integrate", ): @@ -80,6 +81,10 @@ def off_axis_projection( north_vector : optional, array_like, default None A vector that, if specified, restricts the orientation such that the north vector dotted into the image plane points "up". Useful for rotations + depth: float, tuple[float, str], or unyt_array of size 1. + specify the depth of the projection region (size along the + line of sight). If no units are given (unyt_array or second + tuple element), code units are assumed. num_threads: integer, optional, default 1 Use this many OpenMP threads during projection. method : string @@ -145,6 +150,16 @@ def off_axis_projection( center = data_source.ds.arr(center, "code_length") if not hasattr(width, "units"): width = data_source.ds.arr(width, "code_length") + if depth is not None: + # handle units (intrinsic or as a tuple), + # then convert to code length + # float -> assumed to be in code units + if isinstance(depth, tuple): + depth = data_source.ds.arr(np.array([depth[0]]), depth[1]) + if hasattr(depth, "units"): + depth = depth.to("code_length").d + + # depth = data_source.ds.arr(depth, "code_length") if hasattr(data_source.ds, "_sph_ptypes"): if method != "integrate": @@ -203,16 +218,29 @@ def off_axis_projection( buf = np.zeros((resolution[0], resolution[1]), dtype="float64") mask = np.ones_like(buf, dtype="uint8") - x_min = center[0] - width[0] / 2 - x_max = center[0] + width[0] / 2 - y_min = center[1] - width[1] / 2 - y_max = center[1] + width[1] / 2 - z_min = center[2] - width[2] / 2 - z_max = center[2] + width[2] / 2 + ## width from fixed_resolution.py is just the size of the domain + # x_min = center[0] - width[0] / 2 + # x_max = center[0] + width[0] / 2 + # y_min = center[1] - width[1] / 2 + # y_max = center[1] + width[1] / 2 + # z_min = center[2] - width[2] / 2 + # z_max = center[2] + width[2] / 2 + + periodic = data_source.ds.periodicity + le = data_source.ds.domain_left_edge.to("code_length").d + re = data_source.ds.domain_right_edge.to("code_length").d + x_min, y_min, z_min = le + x_max, y_max, z_max = re + bounds = [x_min, x_max, y_min, y_max, z_min, z_max] + # only need (rotated) x/y widths + _width = (width.to("code_length").d)[:2] finfo = data_source.ds.field_info[item] ounits = finfo.output_units - bounds = [x_min, x_max, y_min, y_max, z_min, z_max] - + kernel_name = None + if hasattr(data_source.ds, "kernel_name"): + kernel_name = data_source.ds.kernel_name + if kernel_name is None: + kernel_name = "cubic" if weight is None: for chunk in data_source.chunks([], "io"): off_axis_projection_SPH( @@ -224,12 +252,15 @@ def off_axis_projection( chunk[ptype, "smoothing_length"].to("code_length").d, bounds, center.to("code_length").d, - width.to("code_length").d, + _width, + periodic, chunk[item].in_units(ounits), buf, mask, normal_vector, north, + depth=depth, + kernel_name=kernel_name, ) # Assure that the path length unit is in the default length units @@ -262,13 +293,16 @@ def off_axis_projection( chunk[ptype, "smoothing_length"].to("code_length").d, bounds, center.to("code_length").d, - width.to("code_length").d, + _width, + periodic, chunk[item].in_units(ounits), buf, mask, normal_vector, north, weight_field=chunk[weight].in_units(wounits), + depth=depth, + kernel_name=kernel_name, ) for chunk in data_source.chunks([], "io"): @@ -281,12 +315,15 @@ def off_axis_projection( chunk[ptype, "smoothing_length"].to("code_length").d, bounds, center.to("code_length").d, - width.to("code_length").d, + _width, + periodic, chunk[weight].to(wounits), weight_buff, mask, normal_vector, north, + depth=depth, + kernel_name=kernel_name, ) normalization_2d_utility(buf, weight_buff) @@ -300,6 +337,7 @@ def off_axis_projection( "north_vector": north_vector, "normal_vector": normal_vector, "width": width, + "depth": depth, "units": funits, "type": "SPH smoothed projection", } @@ -487,7 +525,8 @@ def temp_weightfield(field, data): image *= dl else: mask = image[:, :, 1] == 0 - image[:, :, 0] /= image[:, :, 1] + nmask = np.logical_not(mask) + image[:, :, 0][nmask] /= image[:, :, 1][nmask] image[mask] = 0 return image[:, :, 0] diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index cca5ff178a..817ed71ca5 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -1,8 +1,9 @@ -import builtins +import sys from copy import deepcopy import numpy as np +from yt._maintenance.ipython_compat import IS_IPYTHON from yt.config import ytcfg from yt.data_objects.api import ImageArray from yt.funcs import ensure_numpy_array, get_num_threads, get_pbar, is_sequence, mylog @@ -34,6 +35,9 @@ from .transfer_functions import ProjectionTransferFunction +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + def get_corners(le, re): return np.array( @@ -410,7 +414,7 @@ def draw_coordinate_vectors(self, im, length=0.05, thickness=1): # we flipped it in snapshot to get the orientation correct, so # flip the lines - for vec, color in zip(coord_vectors, colors): + for vec, color in zip(coord_vectors, colors, strict=True): dx = int(np.dot(vec, self.orienter.unit_vectors[0])) dy = int(np.dot(vec, self.orienter.unit_vectors[1])) px = np.array([px0, px0 + dx], dtype="int64") @@ -896,7 +900,7 @@ def show(self, clip_ratio=None): >>> cam.show() """ - if "__IPYTHON__" in dir(builtins): + if IS_IPYTHON: from IPython.core.displaypub import publish_display_data image = self.snapshot()[:, :, :3] @@ -1921,7 +1925,7 @@ def _setup_box_properties(self, width, center, unit_vectors): def snapshot(self, fn=None, clip_ratio=None, double_check=False, num_threads=0): my_storage = {} offx, offy = np.meshgrid(range(self.nimx), range(self.nimy)) - offxy = zip(offx.ravel(), offy.ravel()) + offxy = zip(offx.ravel(), offy.ravel(), strict=True) for sto, xy in parallel_objects( offxy, self.procs_per_wg, storage=my_storage, dynamic=True @@ -1957,7 +1961,7 @@ def reduce_images(self, im_dict): final_image = 0 if self.comm.rank == 0: offx, offy = np.meshgrid(range(self.nimx), range(self.nimy)) - offxy = zip(offx.ravel(), offy.ravel()) + offxy = zip(offx.ravel(), offy.ravel(), strict=True) nx, ny = self.resolution final_image = np.empty( (nx * self.nimx, ny * self.nimy, 4), dtype="float64", order="C" @@ -2439,6 +2443,8 @@ def _render(self, double_check, num_threads, image, sampler, msg): data_object_registry["stereospherical_camera"] = StereoSphericalCamera +# replaced in volume_rendering API by the function of the same name in +# yt/visualization/volume_rendering/off_axis_projection def off_axis_projection( ds, center, diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 6943d04ed7..85d122fd5f 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -290,7 +290,7 @@ def transfer_function(self, value): if not isinstance(value, valid_types): raise RuntimeError( "transfer_function not a valid type, " - "received object of type %s" % type(value) + f"received object of type {type(value)}" ) if isinstance(value, ProjectionTransferFunction): self.sampler_type = "projection" @@ -607,7 +607,11 @@ def render(self, camera, zbuffer=None): def finalize_image(self, camera, image): if self._volume is not None: - image = self.volume.reduce_tree_images(image, camera.lens.viewpoint) + image = self.volume.reduce_tree_images( + image, + camera.lens.viewpoint, + use_opacity=self.transfer_function.grey_opacity, + ) return super().finalize_image(camera, image) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index e45033cb14..ae30153a42 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -1,10 +1,10 @@ -import builtins import functools from collections import OrderedDict from typing import Optional, Union import numpy as np +from yt._maintenance.ipython_compat import IS_IPYTHON from yt.config import ytcfg from yt.funcs import mylog from yt.units.dimensions import length # type: ignore @@ -141,8 +141,8 @@ def add_source(self, render_source, keyname=None): lens_str = str(self.camera.lens) if "fisheye" in lens_str or "spherical" in lens_str: raise NotImplementedError( - "Line annotation sources are not supported for %s." - % (type(self.camera.lens).__name__), + "Line annotation sources are not supported " + f"for {type(self.camera.lens).__name__}." ) if isinstance(render_source, (LineSource, PointSource)): @@ -909,7 +909,7 @@ def show(self, sigma_clip=None): >>> sc.show() """ - if "__IPYTHON__" in dir(builtins): + if IS_IPYTHON: from IPython.display import display self._sigma_clip = sigma_clip diff --git a/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py b/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py index dc5f4ae13a..57f6f85d52 100644 --- a/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py +++ b/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py @@ -26,6 +26,7 @@ def test_no_rotation(): width = right_edge - left_edge px = ad["all", "particle_position_x"] py = ad["all", "particle_position_y"] + pz = ad["all", "particle_position_y"] hsml = ad["all", "smoothing_length"] quantity_to_smooth = ad["gas", "density"] density = ad["io", "density"] @@ -38,7 +39,7 @@ def test_no_rotation(): ds, center, normal_vector, width, resolution, ("gas", "density") ) pixelize_sph_kernel_projection( - buf2, mask, px, py, hsml, mass, density, quantity_to_smooth, bounds + buf2, mask, px, py, pz, hsml, mass, density, quantity_to_smooth, bounds ) assert_almost_equal(buf1.ndarray_view(), buf2) diff --git a/yt/visualization/volume_rendering/transfer_functions.py b/yt/visualization/volume_rendering/transfer_functions.py index aec7682157..5c0a4e58e8 100644 --- a/yt/visualization/volume_rendering/transfer_functions.py +++ b/yt/visualization/volume_rendering/transfer_functions.py @@ -1,9 +1,14 @@ +import sys + import numpy as np from more_itertools import always_iterable from yt.funcs import mylog from yt.utilities.physical_constants import clight, hcgs, kboltz +if sys.version_info < (3, 10): + from yt._maintenance.backports import zip + class TransferFunction: r"""A transfer function governs the transmission of emission and @@ -433,7 +438,7 @@ def add_gaussian(self, location, width, height): >>> tf = ColorTransferFunction((-10.0, -5.0)) >>> tf.add_gaussian(-9.0, 0.01, [1.0, 0.0, 0.0, 1.0]) """ - for tf, v in zip(self.funcs, height): + for tf, v in zip(self.funcs, height, strict=True): tf.add_gaussian(location, width, v) self.features.append( ( @@ -474,7 +479,7 @@ def add_step(self, start, stop, value): >>> tf = ColorTransferFunction((-10.0, -5.0)) >>> tf.add_step(-6.0, -5.0, [1.0, 1.0, 1.0, 1.0]) """ - for tf, v in zip(self.funcs, value): + for tf, v in zip(self.funcs, value, strict=True): tf.add_step(start, stop, v) self.features.append( ( @@ -890,7 +895,7 @@ def add_layers( alpha = np.ones(N, dtype="float64") elif alpha is None and not self.grey_opacity: alpha = np.logspace(-3, 0, N) - for v, a in zip(np.mgrid[mi : ma : N * 1j], alpha): + for v, a in zip(np.mgrid[mi : ma : N * 1j], alpha, strict=True): self.sample_colormap(v, w, a, colormap=colormap, col_bounds=col_bounds) def get_colormap_image(self, height, width): diff --git a/yt/visualization/volume_rendering/utils.py b/yt/visualization/volume_rendering/utils.py index d275f37b41..60340aca2a 100644 --- a/yt/visualization/volume_rendering/utils.py +++ b/yt/visualization/volume_rendering/utils.py @@ -26,7 +26,7 @@ def data_source_or_all(data_source): raise RuntimeError( "The data_source is not a valid 3D data container.\n" "Expected an object of type YTSelectionContainer3D but received " - "an object of type %s." % type(data_source) + f"an object of type {type(data_source)}." ) return data_source