diff --git a/mcdc/main.py b/mcdc/main.py index 8311eaaa..822d07de 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -1224,20 +1224,60 @@ def dict_to_h5group(dict_, group): group[k] = v +def dd_mergetally(mcdc, data): + """ + Performs tally recombination on domain-decomposed mesh tallies. + Gathers and re-organizes tally data into a single array as it + would appear in a non-decomposed simulation. + """ + + tally = data[TALLY] + # create bin for recomposed tallies + d_Nx = input_deck.technique["dd_mesh"]["x"].size - 1 + d_Ny = input_deck.technique["dd_mesh"]["y"].size - 1 + d_Nz = input_deck.technique["dd_mesh"]["z"].size - 1 + + # capture tally lengths for reorganizing later + xlen = len(mcdc["mesh_tallies"][0]["filter"]["x"]) - 1 + ylen = len(mcdc["mesh_tallies"][0]["filter"]["y"]) - 1 + zlen = len(mcdc["mesh_tallies"][0]["filter"]["z"]) - 1 + + dd_tally = np.zeros((tally.shape[0], tally.shape[1] * d_Nx * d_Ny * d_Nz)) + # gather tallies + for i, t in enumerate(tally): + MPI.COMM_WORLD.Gather(tally[i], dd_tally[i], root=0) + if mcdc["mpi_master"]: + buff = np.zeros_like(dd_tally) + # reorganize tally data + # TODO: find/develop a more efficient algorithm for this + tally_idx = 0 + for di in range(0, d_Nx * d_Ny * d_Nz): + dz = di // (d_Nx * d_Ny) + dy = (di % (d_Nx * d_Ny)) // d_Nx + dx = di % d_Nx + for xi in range(0, xlen): + for yi in range(0, ylen): + for zi in range(0, zlen): + # calculate reorganized index + ind_x = xi * (ylen * d_Ny * zlen * d_Nz) + dx * ( + xlen * ylen * d_Ny * zlen * d_Nz + ) + ind_y = yi * (xlen * d_Nx) + dy * (ylen * xlen * d_Nx) + ind_z = zi + dz * zlen + buff_idx = ind_x + ind_y + ind_z + # place tally value in correct position + buff[:, buff_idx] = dd_tally[:, tally_idx] + tally_idx += 1 + # replace old tally with reorganized tally + dd_tally = buff + + return dd_tally + + def generate_hdf5(data, mcdc): - # recombine tallies before output processing if mcdc["technique"]["domain_decomposition"]: - tally = data[TALLY] - # create bin for recomposed tallies - d_Nx = input_deck.technique["dd_mesh"]["x"].size - 1 - d_Ny = input_deck.technique["dd_mesh"]["y"].size - 1 - d_Nz = input_deck.technique["dd_mesh"]["z"].size - 1 - dd_tally = np.zeros((tally.shape[0], tally.shape[1] * d_Nx * d_Ny * d_Nz)) - # gather tallies - MPI.COMM_WORLD.Gather(tally[0], dd_tally[0], root=0) - MPI.COMM_WORLD.Gather(tally[1], dd_tally[1], root=0) - MPI.COMM_WORLD.Gather(tally[2], dd_tally[2], root=0) + dd_tally = dd_mergetally(mcdc, data) if mcdc["mpi_master"]: if mcdc["setting"]["progress_bar"]: diff --git a/test/regression/dd_cooper/answer.h5 b/test/regression/dd_cooper/answer.h5 new file mode 100644 index 00000000..c350ca68 Binary files /dev/null and b/test/regression/dd_cooper/answer.h5 differ diff --git a/test/regression/dd_cooper/input.py b/test/regression/dd_cooper/input.py new file mode 100644 index 00000000..f660971f --- /dev/null +++ b/test/regression/dd_cooper/input.py @@ -0,0 +1,62 @@ +import numpy as np +import mcdc + + +# ============================================================================= +# Set model +# ============================================================================= +# A shielding problem based on Problem 2 of [Coper NSE 2001] +# https://ans.tandfonline.com/action/showCitFormats?doi=10.13182/NSE00-34 + +# Set materials +SigmaT = 5.0 +c = 0.8 +m_barrier = mcdc.material(capture=np.array([SigmaT]), scatter=np.array([[SigmaT * c]])) +SigmaT = 1.0 +m_room = mcdc.material(capture=np.array([SigmaT]), scatter=np.array([[SigmaT * c]])) + +# Set surfaces +sx1 = mcdc.surface("plane-x", x=0.0, bc="reflective") +sx2 = mcdc.surface("plane-x", x=2.0) +sx3 = mcdc.surface("plane-x", x=2.4) +sx4 = mcdc.surface("plane-x", x=4.0, bc="vacuum") +sy1 = mcdc.surface("plane-y", y=0.0, bc="reflective") +sy2 = mcdc.surface("plane-y", y=2.0) +sy3 = mcdc.surface("plane-y", y=4.0, bc="vacuum") +sz1 = mcdc.surface("plane-z", z=0.0, bc="reflective") +sz2 = mcdc.surface("plane-z", z=4.0, bc="vacuum") + +# Set cells +mcdc.cell(+sx1 & -sx2 & +sy1 & -sy2 & +sz1 & -sz2, m_room) +mcdc.cell(+sx1 & -sx4 & +sy2 & -sy3 & +sz1 & -sz2, m_room) +mcdc.cell(+sx3 & -sx4 & +sy1 & -sy2 & +sz1 & -sz2, m_room) +mcdc.cell(+sx2 & -sx3 & +sy1 & -sy2 & +sz1 & -sz2, m_barrier) + +# ============================================================================= +# Set source +# ============================================================================= +# Uniform isotropic source throughout the domain + +mcdc.source(x=[0.0, 1.0], y=[0.0, 1.0], z=[0.0, 1.0], isotropic=True) + +# ============================================================================= +# Set tally, setting, and run mcdc +# ============================================================================= + +mcdc.tally.mesh_tally( + scores=["flux"], + x=np.linspace(0.0, 4.0, 11), + y=np.linspace(0.0, 4.0, 11), + z=np.linspace(0.0, 4.0, 11), +) + +# Setting +mcdc.setting(N_particle=50) +mcdc.domain_decomposition( + x=np.linspace(0.0, 4.0, 3), y=np.linspace(0.0, 4.0, 3), z=np.linspace(0.0, 4.0, 3) +) + +mcdc.implicit_capture() + +# Run +mcdc.run() diff --git a/test/regression/run.py b/test/regression/run.py index d00ab196..cbee70a9 100644 --- a/test/regression/run.py +++ b/test/regression/run.py @@ -44,13 +44,20 @@ # Skip domain decomp tests unless there are 4 MPI processes temp = names.copy() for name in names: - if name[:3] == "dd_" and not (mpiexec == 4 or srun == 4): + if name == "dd_slab_reed" and not (mpiexec == 4 or srun == 4): temp.remove(name) print( Fore.YELLOW + "Note: Skipping %s (require 4 MPI ranks)" % name + Style.RESET_ALL ) + elif name == "dd_cooper" and not (mpiexec == 8 or srun == 8): + temp.remove(name) + print( + Fore.YELLOW + + "Note: Skipping %s (require 8 MPI ranks)" % name + + Style.RESET_ALL + ) names = temp # Skip iqmc if GPU run