Skip to content

Commit

Permalink
minor
Browse files Browse the repository at this point in the history
  • Loading branch information
leonfoks committed Jan 10, 2024
1 parent 98a497f commit 3a86f58
Showing 1 changed file with 9 additions and 9 deletions.
18 changes: 9 additions & 9 deletions geobipy/src/inversion/Inference3D.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,11 +262,11 @@ def create_hdf5(self, directory, **kwargs):
if self.parallel_access:
from mpi4py import MPI

# Split off a master communicator.
# Split off a single core communicator.
single_rank_comm = self.world.Create(self.world.Get_group().Incl([0]))

if (single_rank_comm != MPI.COMM_NULL):
# Instantiate a new blank inference3d linked to the master
# Instantiate a new blank inference3d linked to the head rank
inference3d = Inference3D(self.data, prng=Generator(PCG64DXSM()), world=single_rank_comm)
# Create the hdf5 files
inference3d._create_HDF5_dataset(directory, **kwargs)
Expand Down Expand Up @@ -469,14 +469,14 @@ def infer_mpi(self, **options):

t0 = MPI.Wtime()

# Carryout the master-worker tasks
# Carryout the head-worker tasks
if (world.rank == 0):
self._infer_mpi_master_task(**options)
else:
self._infer_mpi_worker_task(**options)

def _infer_mpi_master_task(self, **options):
""" Define a Send Recv Send procedure on the master """
""" Define a Send Recv Send procedure on the head rank """

from mpi4py import MPI
from ..base import MPI as myMPI
Expand Down Expand Up @@ -509,7 +509,7 @@ def _infer_mpi_master_task(self, **options):
# Start a timer
t0 = MPI.Wtime()

myMPI.print("Initial data points sent. Master is now waiting for requests")
myMPI.print("Initial data points sent. Head rank is now waiting for requests")

# Now wait to send indices out to the workers as they finish until the entire data set is finished
while nFinished < nPoints:
Expand Down Expand Up @@ -583,7 +583,7 @@ def _infer_mpi_worker_task(self, **options):
myMPI.print("datapoint {} {} failed to converge".format(datapoint.lineNumber, datapoint.fiducial))
# save('Converge_fail_seed_{}_{}'.format(datapoint.lineNumber, datapoint.fiducial), inference.seed)

# Ping the Master to request a new index
# Ping the head rank to request a new index
world.send('requesting', dest=0)

# Wait till you are told whether to continue or not
Expand Down Expand Up @@ -1235,7 +1235,7 @@ def lineIndex(self, lineNumber=None, fiducial=None, index=None):
def loop_over(self, *args, **kwargs):
"""Generate a loop range.
Tracks progress on the master rank only if parallel.
Tracks progress on the head rank only if parallel.
Parameters
----------
Expand Down Expand Up @@ -1320,7 +1320,7 @@ def fit_mixture_to_pdf_mpi(self, intervals=None, **kwargs):
mixture = mixPearson(a, a, a, a)
mixture.createHdf(hdf_file, 'fits', add_axis=(self.nPoints, self.lines[0].mesh.y.nCells))

if rank == 0: ## Master Task
if rank == 0: ## Head rank
nFinished = 0
nSent = 0

Expand All @@ -1339,7 +1339,7 @@ def fit_mixture_to_pdf_mpi(self, intervals=None, **kwargs):

t0 = MPI.Wtime()

myMPI.print("Initial posteriors sent. Master is now waiting for requests")
myMPI.print("Initial posteriors sent. Head rank is now waiting for requests")

# Now wait to send indices out to the workers as they finish until the entire data set is finished
while nFinished < self.nPoints:
Expand Down

0 comments on commit 3a86f58

Please sign in to comment.