Skip to content

Commit

Permalink
Eliminate deprecation warnings from scipy and pandas (#2951)
Browse files Browse the repository at this point in the history
  • Loading branch information
paulromano authored Jun 12, 2024
1 parent 1f32804 commit dcb8033
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 14 deletions.
12 changes: 7 additions & 5 deletions openmc/data/photon.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ class AtomicRelaxation(EqualityMixin):
Dictionary indicating the number of electrons in a subshell when neutral
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc.
transitions : pandas.DataFrame
transitions : dict of str to pandas.DataFrame
Dictionary indicating allowed transitions and their probabilities
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc. The transitions are represented as
Expand Down Expand Up @@ -363,8 +363,9 @@ def from_hdf5(cls, group):
df = pd.DataFrame(sub_group['transitions'][()],
columns=columns)
# Replace float indexes back to subshell strings
df[columns[:2]] = df[columns[:2]].replace(
np.arange(float(len(_SUBSHELLS))), _SUBSHELLS)
with pd.option_context('future.no_silent_downcasting', True):
df[columns[:2]] = df[columns[:2]].replace(
np.arange(float(len(_SUBSHELLS))), _SUBSHELLS)
transitions[shell] = df

return cls(binding_energy, num_electrons, transitions)
Expand All @@ -387,8 +388,9 @@ def to_hdf5(self, group, shell):

# Write transition data with replacements
if shell in self.transitions:
df = self.transitions[shell].replace(
_SUBSHELLS, range(len(_SUBSHELLS)))
with pd.option_context('future.no_silent_downcasting', True):
df = self.transitions[shell].replace(
_SUBSHELLS, range(len(_SUBSHELLS)))
group.create_dataset('transitions', data=df.values.astype(float))


Expand Down
18 changes: 12 additions & 6 deletions openmc/mgxs_library.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import h5py
import numpy as np
from scipy.integrate import simps
import scipy.integrate
from scipy.interpolate import interp1d
from scipy.special import eval_legendre

Expand Down Expand Up @@ -1823,6 +1823,12 @@ def convert_scatter_format(self, target_format, target_order=None):
# Reset and re-generate XSdata.xs_shapes with the new scattering format
xsdata._xs_shapes = None

# scipy 1.11+ prefers 'simpson', whereas older versions use 'simps'
if hasattr(scipy.integrate, 'simpson'):
integrate = scipy.integrate.simpson
else:
integrate = scipy.integrate.simps

for i, temp in enumerate(xsdata.temperatures):
orig_data = self._scatter_matrix[i]
new_shape = orig_data.shape[:-1] + (xsdata.num_orders,)
Expand Down Expand Up @@ -1860,7 +1866,7 @@ def convert_scatter_format(self, target_format, target_order=None):
table_fine[..., imu] += ((l + 0.5)
* eval_legendre(l, mu_fine[imu]) *
orig_data[..., l])
new_data[..., h_bin] = simps(table_fine, mu_fine)
new_data[..., h_bin] = integrate(table_fine, mu_fine)

elif self.scatter_format == SCATTER_TABULAR:
# Calculate the mu points of the current data
Expand All @@ -1874,7 +1880,7 @@ def convert_scatter_format(self, target_format, target_order=None):
for l in range(xsdata.num_orders):
y = (interp1d(mu_self, orig_data)(mu_fine) *
eval_legendre(l, mu_fine))
new_data[..., l] = simps(y, mu_fine)
new_data[..., l] = integrate(y, mu_fine)

elif target_format == SCATTER_TABULAR:
# Simply use an interpolating function to get the new data
Expand All @@ -1893,7 +1899,7 @@ def convert_scatter_format(self, target_format, target_order=None):
interp = interp1d(mu_self, orig_data)
for h_bin in range(xsdata.num_orders):
mu_fine = np.linspace(mu[h_bin], mu[h_bin + 1], _NMU)
new_data[..., h_bin] = simps(interp(mu_fine), mu_fine)
new_data[..., h_bin] = integrate(interp(mu_fine), mu_fine)

elif self.scatter_format == SCATTER_HISTOGRAM:
# The histogram format does not have enough information to
Expand All @@ -1919,7 +1925,7 @@ def convert_scatter_format(self, target_format, target_order=None):
mu_fine = np.linspace(-1, 1, _NMU)
for l in range(xsdata.num_orders):
y = interp(mu_fine) * norm * eval_legendre(l, mu_fine)
new_data[..., l] = simps(y, mu_fine)
new_data[..., l] = integrate(y, mu_fine)

elif target_format == SCATTER_TABULAR:
# Simply use an interpolating function to get the new data
Expand All @@ -1938,7 +1944,7 @@ def convert_scatter_format(self, target_format, target_order=None):
for h_bin in range(xsdata.num_orders):
mu_fine = np.linspace(mu[h_bin], mu[h_bin + 1], _NMU)
new_data[..., h_bin] = \
norm * simps(interp(mu_fine), mu_fine)
norm * integrate(interp(mu_fine), mu_fine)

# Remove small values resulting from numerical precision issues
new_data[..., np.abs(new_data) < 1.E-10] = 0.
Expand Down
7 changes: 4 additions & 3 deletions tests/unit_tests/test_data_photon.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
#!/usr/bin/env python

from collections.abc import Mapping, Callable
import os
from pathlib import Path
Expand Down Expand Up @@ -123,6 +121,8 @@ def test_reactions(element, reaction):
reactions[18]


# TODO: Remove skip when support is Python 3.9+
@pytest.mark.skipif(not hasattr(pd.options, 'future'), reason='pandas version too old')
@pytest.mark.parametrize('element', ['Pu'], indirect=True)
def test_export_to_hdf5(tmpdir, element):
filename = str(tmpdir.join('tmp.h5'))
Expand All @@ -146,8 +146,9 @@ def test_export_to_hdf5(tmpdir, element):
# Export to hdf5 again
element2.export_to_hdf5(filename, 'w')


def test_photodat_only(run_in_tmpdir):
endf_dir = Path(os.environ['OPENMC_ENDF_DATA'])
photoatomic_file = endf_dir / 'photoat' / 'photoat-001_H_000.endf'
data = openmc.data.IncidentPhoton.from_endf(photoatomic_file)
data.export_to_hdf5('tmp.h5', 'w')
data.export_to_hdf5('tmp.h5', 'w')

0 comments on commit dcb8033

Please sign in to comment.