diff --git a/.github/actions/setup_env/action.yml b/.github/actions/setup_env/action.yml index 8c64f24dff4..a0e4abb5e35 100644 --- a/.github/actions/setup_env/action.yml +++ b/.github/actions/setup_env/action.yml @@ -18,7 +18,7 @@ runs: - name: Generate Cache Key run: | file_hash=$(cat conda-${{ inputs.os-label }}.lock | shasum -a 256 | cut -d' ' -f1) - echo "file_hash=$file_hash" >> "${GITHUB_OUTPUT}" + echo "file_hash=tardis-conda-env-${{ inputs.os-label }}-${file_hash}-v1" >> "${GITHUB_OUTPUT}" id: cache-environment-key shell: bash diff --git a/.github/actions/setup_lfs/action.yml b/.github/actions/setup_lfs/action.yml index 86a3b0464d4..56d644795d3 100644 --- a/.github/actions/setup_lfs/action.yml +++ b/.github/actions/setup_lfs/action.yml @@ -1,12 +1,16 @@ name: "Setup LFS" -description: "Pull LFS repositories and caches them" +description: "Sets up Git LFS, retrieves LFS cache and fails if cache is not available" inputs: regression-data-repo: - description: "tardis regression data repository" + description: "Repository containing regression data (format: owner/repo)" required: false default: "tardis-sn/tardis-regression-data" + atom-data-sparse: + description: "If true, only downloads atom_data/kurucz_cd23_chianti_H_He.h5 instead of full regression data" + required: false + default: 'false' runs: using: "composite" @@ -16,37 +20,37 @@ runs: with: repository: ${{ inputs.regression-data-repo }} path: tardis-regression-data + sparse-checkout: ${{ inputs.atom-data-sparse == 'true' && 'atom_data/kurucz_cd23_chianti_H_He.h5' || '' }} + lfs: false - name: Create LFS file list - run: git lfs ls-files -l | cut -d' ' -f1 | sort > .lfs-assets-id + run: | + if [ "${{ inputs.atom-data-sparse }}" == "true" ]; then + echo "Using atom data sparse checkout" + echo "atom_data/kurucz_cd23_chianti_H_He.h5" > .lfs-files-list + else + echo "Using full repository checkout" + git lfs ls-files -l | cut -d' ' -f1 | sort > .lfs-files-list + fi working-directory: tardis-regression-data shell: bash - + - name: Restore LFS cache uses: actions/cache/restore@v4 id: lfs-cache-regression-data with: path: tardis-regression-data/.git/lfs - key: ${{ runner.os }}-lfs-${{ hashFiles('tardis-regression-data/.lfs-assets-id') }}-v1 + key: tardis-regression-${{ inputs.atom-data-sparse == 'true' && 'atom-data-sparse' || 'full-data' }}-${{ hashFiles('tardis-regression-data/.lfs-files-list') }}-${{ inputs.regression-data-repo }}-v1 + fail-on-cache-miss: true - - name: Git LFS Pull - run: git lfs pull + - name: Git LFS Checkout (Full) + if: inputs.atom-data-sparse != 'true' && steps.lfs-cache-regression-data.outputs.cache-hit == 'true' + run: git lfs checkout working-directory: tardis-regression-data - if: steps.lfs-cache-regression-data.outputs.cache-hit != 'true' shell: bash - - name: Git LFS Checkout - run: git lfs checkout + - name: Git LFS Checkout (Sparse) + if: inputs.atom-data-sparse == 'true' && steps.lfs-cache-regression-data.outputs.cache-hit == 'true' + run: git lfs checkout atom_data/kurucz_cd23_chianti_H_He.h5 working-directory: tardis-regression-data - if: steps.lfs-cache-regression-data.outputs.cache-hit == 'true' shell: bash - - - name: Save LFS cache if not found - # uses fake ternary - # for reference: https://github.com/orgs/community/discussions/26738#discussioncomment-3253176 - if: ${{ steps.lfs-cache-regression-data.outputs.cache-hit != 'true' && !contains(github.ref, 'merge') && always() || false }} - uses: actions/cache/save@v4 - id: lfs-cache-regression-data-save - with: - path: tardis-regression-data/.git/lfs - key: ${{ runner.os }}-lfs-${{ hashFiles('tardis-regression-data/.lfs-assets-id') }}-v1 diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index e1e84afb6ca..db9f730debb 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -29,6 +29,12 @@ defaults: shell: bash -l {0} jobs: + test-cache: + uses: ./.github/workflows/lfs-cache.yml + with: + atom-data-sparse: false + regression-data-repo: tardis-sn/tardis-regression-data + build: if: github.repository_owner == 'tardis-sn' && (github.event_name == 'push' || @@ -37,6 +43,7 @@ jobs: (github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'benchmarks'))) runs-on: ubuntu-latest + needs: [test-cache] steps: - uses: actions/checkout@v4 if: github.event_name != 'pull_request_target' @@ -54,13 +61,10 @@ jobs: run: git fetch origin master:master if: github.event_name == 'pull_request_target' - - uses: actions/checkout@v4 + - name: Setup LFS + uses: ./.github/actions/setup_lfs with: - repository: tardis-sn/tardis-regression-data - path: tardis-regression-data - lfs: true - sparse-checkout: | - atom_data/kurucz_cd23_chianti_H_He.h5 + atom-data-sparse: true - name: Setup Mamba uses: mamba-org/setup-micromamba@v1 diff --git a/.github/workflows/build-docs.yml b/.github/workflows/build-docs.yml index 22a9d1369c6..b9a928d4aef 100644 --- a/.github/workflows/build-docs.yml +++ b/.github/workflows/build-docs.yml @@ -36,6 +36,12 @@ defaults: shell: bash -l {0} jobs: + test-cache: + uses: ./.github/workflows/lfs-cache.yml + with: + atom-data-sparse: true + regression-data-repo: tardis-sn/tardis-regression-data + check-for-changes: runs-on: ubuntu-latest if: ${{ !github.event.pull_request.draft }} @@ -77,7 +83,7 @@ jobs: build-docs: runs-on: ubuntu-latest - needs: check-for-changes + needs: [test-cache, check-for-changes] if: needs.check-for-changes.outputs.trigger-check-outcome == 'success' || needs.check-for-changes.outputs.docs-check-outcome == 'success' steps: - uses: actions/checkout@v4 @@ -90,13 +96,10 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} if: github.event_name == 'pull_request_target' - - uses: actions/checkout@v4 + - name: Setup LFS + uses: ./.github/actions/setup_lfs with: - repository: tardis-sn/tardis-regression-data - path: tardis-regression-data - lfs: true - sparse-checkout: | - atom_data/kurucz_cd23_chianti_H_He.h5 + atom-data-sparse: true - name: Setup environment uses: ./.github/actions/setup_env diff --git a/.github/workflows/lfs-cache.yml b/.github/workflows/lfs-cache.yml new file mode 100644 index 00000000000..9f992f55516 --- /dev/null +++ b/.github/workflows/lfs-cache.yml @@ -0,0 +1,84 @@ +name: Save LFS Cache + +on: + workflow_call: + inputs: + atom-data-sparse: + description: "If true, only downloads atom_data/kurucz_cd23_chianti_H_He.h5" + required: false + default: false + type: boolean + regression-data-repo: + description: "Repository containing regression data (format: owner/repo)" + required: false + default: "tardis-sn/tardis-regression-data" + type: string + +defaults: + run: + shell: bash -l {0} + +concurrency: + # Only one workflow can run at a time + # the workflow group is a unique identifier and contains the workflow name, pull request number, atom data sparse, and regression data repo + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}-${{ inputs.atom-data-sparse == 'true' && 'atom-data-sparse' || 'full-data' }}-${{ inputs.regression-data-repo }} + cancel-in-progress: true + + +jobs: + lfs-cache: + if: github.repository_owner == 'tardis-sn' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + repository: ${{ inputs.regression-data-repo }} + path: tardis-regression-data + sparse-checkout: ${{ inputs.atom-data-sparse == 'true' && 'atom_data/kurucz_cd23_chianti_H_He.h5' || '' }} + + - name: Create LFS file list + run: | + if [ "${{ inputs.atom-data-sparse }}" == "true" ]; then + echo "Using atom data sparse checkout" + echo "atom_data/kurucz_cd23_chianti_H_He.h5" > .lfs-files-list + else + echo "Using full repository checkout" + git lfs ls-files -l | cut -d' ' -f1 | sort > .lfs-files-list + fi + working-directory: tardis-regression-data + + + - name: Test cache availability + uses: actions/cache/restore@v4 + id: test-lfs-cache-regression-data + with: + path: tardis-regression-data/.git/lfs + key: tardis-regression-${{ inputs.atom-data-sparse == 'true' && 'atom-data-sparse' || 'full-data' }}-${{ hashFiles('tardis-regression-data/.lfs-files-list') }}-${{ inputs.regression-data-repo }}-v1 + lookup-only: true + + - name: Git LFS Pull Atom Data + run: git lfs pull --include=atom_data/kurucz_cd23_chianti_H_He.h5 + if: ${{ inputs.atom-data-sparse == true && steps.test-lfs-cache-regression-data.outputs.cache-hit != 'true' }} + working-directory: tardis-regression-data + + - name: Git LFS Pull Full Data + run: git lfs pull + if: ${{ inputs.atom-data-sparse == false && steps.test-lfs-cache-regression-data.outputs.cache-hit != 'true' }} + working-directory: tardis-regression-data + + - name: Git LFS Checkout + if: ${{ inputs.atom-data-sparse == 'true' }} + run: git lfs checkout atom_data/kurucz_cd23_chianti_H_He.h5 + working-directory: tardis-regression-data + + - name: Git LFS Checkout Full + if: ${{ inputs.atom-data-sparse == 'false' }} + run: git lfs checkout + working-directory: tardis-regression-data + + - name: Save LFS cache if not found + uses: actions/cache/save@v4 + if: ${{ steps.test-lfs-cache-regression-data.outputs.cache-hit != 'true' && !contains(github.ref, 'merge') }} + with: + path: tardis-regression-data/.git/lfs + key: tardis-regression-${{ inputs.atom-data-sparse == true && 'atom-data-sparse' || 'full-data' }}-${{ hashFiles('tardis-regression-data/.lfs-files-list') }}-${{ inputs.regression-data-repo }}-v1 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7513c56e9d5..b0b4353a78f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -38,9 +38,16 @@ concurrency: cancel-in-progress: true jobs: + test-cache: + uses: ./.github/workflows/lfs-cache.yml + with: + atom-data-sparse: false + regression-data-repo: tardis-sn/tardis-regression-data + tests: name: ${{ matrix.continuum }} continuum ${{ matrix.os }} ${{ inputs.pip_git && 'pip tests enabled' || '' }} if: github.repository_owner == 'tardis-sn' + needs: [test-cache] runs-on: ${{ matrix.os }} strategy: fail-fast: false diff --git a/docs/contributing/development/continuous_integration.rst b/docs/contributing/development/continuous_integration.rst index 45db365ccf1..82aec6f8964 100644 --- a/docs/contributing/development/continuous_integration.rst +++ b/docs/contributing/development/continuous_integration.rst @@ -27,6 +27,30 @@ TARDIS Pipelines Brief description of pipelines already implemented on TARDIS +Cache Keys in TARDIS CI +----------------------- + +TARDIS uses specific cache key formats to efficiently store and retrieve data during CI runs: + +1. **Regression Data Cache Keys** + - Format: ``tardis-regression---v1`` + - Examples: + - ``tardis-regression-atom-data-sparse--v1`` - For atomic data cache + - ``tardis-regression-full-data--v1`` - For full TARDIS regression data cache + - Used in: ``setup_lfs`` action + +2. **Environment Cache Keys** + - Format: ``tardis-conda-env---v1`` + - Examples: + - ``tardis-conda-env-linux--v1`` - For Linux conda environment + - ``tardis-conda-env-macos--v1`` - For macOS conda environment + - Used in: ``setup_env`` action + +.. warning:: + - The version suffix (-v1) allows for future cache invalidation if needed. + - Sometimes the cache might not be saved due to race conditions between parallel jobs. Please check workflow runs when testing new regression data for cache misses to avoid consuming LFS quota. + + Streamlined Steps for TARDIS Pipelines ======================================== diff --git a/docs/contributing/development/running_tests.rst b/docs/contributing/development/running_tests.rst index a11ac2eca7e..c1150e51340 100644 --- a/docs/contributing/development/running_tests.rst +++ b/docs/contributing/development/running_tests.rst @@ -62,7 +62,7 @@ Or, to run tests for a particular file or directory To prevent leaking LFS quota, tests have been disabled on forks. If, by any chance, you need to run tests on your fork, make sure to run the tests workflow on master branch first. The LFS cache generated in the master branch should be available in all child branches. - You can check if cache was generated by looking in the ``Restore LFS Cache`` step of the workflow run. + You can check if cache was generated by looking in the ``Setup LFS`` step of the workflow run. Cache can also be found under the "Management" Section under "Actions" tab. Generating Plasma Reference diff --git a/docs/io/grid/how_to_TardisGridTutorial.ipynb b/docs/io/grid/how_to_TardisGridTutorial.ipynb index b4655640cd4..e0e91d24e6b 100644 --- a/docs/io/grid/how_to_TardisGridTutorial.ipynb +++ b/docs/io/grid/how_to_TardisGridTutorial.ipynb @@ -60,7 +60,7 @@ "outputs": [], "source": [ "# Create a tardis grid directly from a dataframe.\n", - "grid.tardisGrid(configFile='example.yml', gridFrame=df)" + "grid.TardisGrid(configFile='example.yml', gridFrame=df)" ] }, { @@ -82,7 +82,7 @@ "outputs": [], "source": [ "#Create a tardis grid from an axes dict using the classmethod.\n", - "grid.tardisGrid.from_axes(configFile='example.yml', axesdict=axesdict)" + "grid.TardisGrid.from_axes(configFile='example.yml', axesdict=axesdict)" ] }, { @@ -100,7 +100,7 @@ "metadata": {}, "outputs": [], "source": [ - "tg = grid.tardisGrid(configFile='example.yml', gridFrame=df)" + "tg = grid.TardisGrid(configFile='example.yml', gridFrame=df)" ] }, { diff --git a/tardis/analysis/opacities.py b/tardis/analysis/opacities.py index 4b8428c55b8..dcf12cddb08 100644 --- a/tardis/analysis/opacities.py +++ b/tardis/analysis/opacities.py @@ -12,7 +12,7 @@ logger = logging.getLogger(__name__) -class opacity_calculator: +class OpacityCalculator: """Basic Tardis opacity and optical depth calculator Given the model object of a Tardis run and a frequency grid, detailed diff --git a/tardis/grid/base.py b/tardis/grid/base.py index b468a5b6828..5ea653a761f 100644 --- a/tardis/grid/base.py +++ b/tardis/grid/base.py @@ -32,7 +32,7 @@ def _set_tardis_config_property(tardis_config, key, value): setattr(tmp_dict, keyitems[-1], value) -class tardisGrid: +class TardisGrid: """ A class that stores a grid of TARDIS parameters and facilitates running large numbers of simulations diff --git a/tardis/grid/tests/test_grid.py b/tardis/grid/tests/test_grid.py index e6f84281acc..392b7a83ff0 100644 --- a/tardis/grid/tests/test_grid.py +++ b/tardis/grid/tests/test_grid.py @@ -20,8 +20,8 @@ def test_grid(atomic_dataset): } df = pd.read_csv(dfpath) - g = grid.tardisGrid(configFile=ymlpath, gridFrame=df) - g2 = grid.tardisGrid.from_axes(configFile=ymlpath, axesdict=axesdict) + g = grid.TardisGrid(configFile=ymlpath, gridFrame=df) + g2 = grid.TardisGrid.from_axes(configFile=ymlpath, axesdict=axesdict) # Check that grid attribute has the right shape assert g.grid.shape == df.shape diff --git a/tardis/io/model/readers/tests/test_arepo_parser.py b/tardis/io/model/readers/tests/test_arepo_parser.py index fd88eaaec8f..c874e5af6c2 100644 --- a/tardis/io/model/readers/tests/test_arepo_parser.py +++ b/tardis/io/model/readers/tests/test_arepo_parser.py @@ -8,7 +8,7 @@ from tardis.io.model.readers import arepo -@pytest.fixture() +@pytest.fixture def arepo_snapshot_fname(tardis_regression_path): return Path(tardis_regression_path) / "arepo_data" / "arepo_snapshot.json" diff --git a/tardis/plasma/tests/test_tardis_model_density_config.py b/tardis/plasma/tests/test_tardis_model_density_config.py index 5551e7c4eb4..81212b874d6 100644 --- a/tardis/plasma/tests/test_tardis_model_density_config.py +++ b/tardis/plasma/tests/test_tardis_model_density_config.py @@ -14,14 +14,14 @@ def tardis_model_density_config(example_model_file_dir): ) -@pytest.fixture() +@pytest.fixture def raw_simulation_state(tardis_model_density_config, kurucz_atomic_data): return SimulationState.from_config( tardis_model_density_config, atom_data=kurucz_atomic_data ) -@pytest.fixture() +@pytest.fixture def raw_plasma( tardis_model_density_config, raw_simulation_state, kurucz_atomic_data ): diff --git a/tardis/transport/montecarlo/tests/conftest.py b/tardis/transport/montecarlo/tests/conftest.py index 61f80e4aac0..9a28726265c 100644 --- a/tardis/transport/montecarlo/tests/conftest.py +++ b/tardis/transport/montecarlo/tests/conftest.py @@ -147,7 +147,7 @@ def static_packet(): ) -@pytest.fixture() +@pytest.fixture def set_seed_fixture(): def set_seed(value): np.random.seed(value) @@ -155,7 +155,7 @@ def set_seed(value): return njit(set_seed) -@pytest.fixture() +@pytest.fixture def random_call_fixture(): def random_call(): np.random.random() diff --git a/tardis/transport/montecarlo/tests/test_rpacket_last_interaction_tracker.py b/tardis/transport/montecarlo/tests/test_rpacket_last_interaction_tracker.py index b2c449cadab..54c84df5579 100644 --- a/tardis/transport/montecarlo/tests/test_rpacket_last_interaction_tracker.py +++ b/tardis/transport/montecarlo/tests/test_rpacket_last_interaction_tracker.py @@ -18,7 +18,7 @@ def interaction_type_in_use( return interaction_type -@pytest.fixture() +@pytest.fixture def shell_id_in_use( nb_simulation_verysimple, interaction_type_in_use, @@ -32,7 +32,7 @@ def shell_id_in_use( return shell_id[mask] -@pytest.fixture() +@pytest.fixture def r_in_use( nb_simulation_verysimple, interaction_type_in_use, @@ -65,7 +65,7 @@ def interaction_type_to_check( return interaction_type -@pytest.fixture() +@pytest.fixture def shell_id_to_check( nb_simulation_verysimple, interaction_type_to_check, @@ -83,7 +83,7 @@ def shell_id_to_check( return shell_id[mask] -@pytest.fixture() +@pytest.fixture def r_to_check( nb_simulation_verysimple, interaction_type_to_check, @@ -101,7 +101,7 @@ def r_to_check( return r[mask] -@pytest.fixture() +@pytest.fixture def nu_packet_collection( nb_simulation_verysimple, ): @@ -112,7 +112,7 @@ def nu_packet_collection( return packet_collection.output_nus -@pytest.fixture() +@pytest.fixture def nu_to_check( nb_simulation_verysimple, ): diff --git a/tardis/transport/montecarlo/tests/test_rpacket_tracker.py b/tardis/transport/montecarlo/tests/test_rpacket_tracker.py index 971183d2360..fd9dec9fae7 100644 --- a/tardis/transport/montecarlo/tests/test_rpacket_tracker.py +++ b/tardis/transport/montecarlo/tests/test_rpacket_tracker.py @@ -9,7 +9,7 @@ from tardis.transport.montecarlo.r_packet import InteractionType -@pytest.fixture() +@pytest.fixture def interaction_type_last_interaction_class( simulation_rpacket_tracking, ): @@ -20,7 +20,7 @@ def interaction_type_last_interaction_class( return interaction_type -@pytest.fixture() +@pytest.fixture def shell_id_last_interaction_class( simulation_rpacket_tracking, ): @@ -39,7 +39,7 @@ def shell_id_last_interaction_class( return last_line_interaction_shell_id -@pytest.fixture() +@pytest.fixture def nu_from_packet_collection( simulation_rpacket_tracking, ): @@ -73,7 +73,7 @@ def last_interaction_type_rpacket_tracker(rpacket_tracker): return interaction_type -@pytest.fixture() +@pytest.fixture def shell_id_rpacket_tracker( rpacket_tracker, last_interaction_type_rpacket_tracker ): @@ -92,7 +92,7 @@ def shell_id_rpacket_tracker( return last_line_interaction_shell_id -@pytest.fixture() +@pytest.fixture def nu_rpacket_tracker(rpacket_tracker): """Output nu of rpacket from RPacketTracker class""" no_of_packets = len(rpacket_tracker) diff --git a/tardis/visualization/tools/tests/test_convergence_plot.py b/tardis/visualization/tools/tests/test_convergence_plot.py index bb69e23e68b..c511c6ce8c2 100644 --- a/tardis/visualization/tools/tests/test_convergence_plot.py +++ b/tardis/visualization/tools/tests/test_convergence_plot.py @@ -22,7 +22,7 @@ def convergence_plots(request): return convergence_plots -@pytest.fixture() +@pytest.fixture def fetch_luminosity_data(convergence_plots): """Prepare data for t_inner and luminosity plot.""" for item in [2] * convergence_plots.iterations: diff --git a/tardis/workflows/simple_tardis_workflow.py b/tardis/workflows/simple_tardis_workflow.py index 9abb21be7fe..2fd45bef589 100644 --- a/tardis/workflows/simple_tardis_workflow.py +++ b/tardis/workflows/simple_tardis_workflow.py @@ -445,7 +445,7 @@ def initialize_spectrum_solver( def run(self): """Run the TARDIS simulation until convergence is reached""" - converged = False + self.converged = False while self.completed_iterations < self.total_iterations - 1: logger.info( f"\n\tStarting iteration {(self.completed_iterations + 1):d} of {self.total_iterations:d}" @@ -466,13 +466,13 @@ def run(self): self.solve_plasma(estimated_radfield_properties) - converged = self.check_convergence(estimated_values) + self.converged = self.check_convergence(estimated_values) self.completed_iterations += 1 - if converged and self.convergence_strategy.stop_if_converged: + if self.converged and self.convergence_strategy.stop_if_converged: break - if converged: + if self.converged: logger.info("\n\tStarting final iteration") else: logger.error( diff --git a/tardis/workflows/standard_tardis_workflow.py b/tardis/workflows/standard_tardis_workflow.py index e4ed2433799..53b0ad7c753 100644 --- a/tardis/workflows/standard_tardis_workflow.py +++ b/tardis/workflows/standard_tardis_workflow.py @@ -206,7 +206,7 @@ def update_convergence_plot_data(self, plot_data_dict): def run(self): """Run the TARDIS simulation until convergence is reached""" - converged = False + self.converged = False while self.completed_iterations < self.total_iterations - 1: logger.info( f"\n\tStarting iteration {(self.completed_iterations + 1):d} of {self.total_iterations:d}" @@ -237,13 +237,13 @@ def run(self): self.solve_plasma(estimated_radfield_properties) - converged = self.check_convergence(estimated_values) + self.converged = self.check_convergence(estimated_values) self.completed_iterations += 1 - if converged and self.convergence_strategy.stop_if_converged: + if self.converged and self.convergence_strategy.stop_if_converged: break - if converged: + if self.converged: logger.info("\n\tStarting final iteration") else: logger.error( diff --git a/tardis/workflows/v_inner_solver.py b/tardis/workflows/v_inner_solver.py index 20edd13c8c9..fcfdfb8ad49 100644 --- a/tardis/workflows/v_inner_solver.py +++ b/tardis/workflows/v_inner_solver.py @@ -309,7 +309,7 @@ def solve_plasma( def run(self): """Run the TARDIS simulation until convergence is reached""" - converged = False + self.converged = False while self.completed_iterations < self.total_iterations - 1: logger.info( f"\n\tStarting iteration {(self.completed_iterations + 1):d} of {self.total_iterations:d}" @@ -334,14 +334,14 @@ def run(self): estimated_values["mask"], ) - converged = self.check_convergence(estimated_values) + self.converged = self.check_convergence(estimated_values) self.completed_iterations += 1 - if converged and self.convergence_strategy.stop_if_converged: + if self.converged and self.convergence_strategy.stop_if_converged: break - if converged: + if self.converged: logger.info("\n\tStarting final iteration") else: logger.error(