Skip to content

Commit

Permalink
imbalance on init experiments
Browse files Browse the repository at this point in the history
  • Loading branch information
PhilipDeegan committed Jan 25, 2024
1 parent d301826 commit 912e0a8
Show file tree
Hide file tree
Showing 11 changed files with 189 additions and 59 deletions.
16 changes: 12 additions & 4 deletions pyphare/pyphare/pharein/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,7 @@ def __init__(self, fn):
self.fn = fn

def __call__(self, *xyz):
args = []
for i, arg in enumerate(xyz):
args.append(np.asarray(arg))
args = [np.asarray(arg) for arg in xyz]
ret = self.fn(*args)
if isinstance(ret, list):
ret = np.asarray(ret)
Expand Down Expand Up @@ -219,6 +217,8 @@ def as_paths(rb):
path = f"simulation/advanced/{k}"
if isinstance(v, int):
add_int(path, v)
elif isinstance(v, float):
add_double(path, v)
else:
add_string(path, v)

Expand All @@ -245,12 +245,20 @@ def as_paths(rb):
addInitFunction(partinit_path + "thermal_velocity_x", fn_wrapper(d["vthx"]))
addInitFunction(partinit_path + "thermal_velocity_y", fn_wrapper(d["vthy"]))
addInitFunction(partinit_path + "thermal_velocity_z", fn_wrapper(d["vthz"]))
add_int(partinit_path + "nbr_part_per_cell", d["nbrParticlesPerCell"])
add_double(partinit_path + "charge", d["charge"])
add_string(partinit_path + "basis", "cartesian")
if "init" in d and "seed" in d["init"]:
pp.add_optional_size_t(partinit_path + "init/seed", d["init"]["seed"])

if isinstance(d["nbrParticlesPerCell"], tuple):
addInitFunction(
partinit_path + "nbr_part_per_cell_fn",
fn_wrapper(d["nbrParticlesPerCell"][0]),
)
add_int(partinit_path + "nbr_part_per_cell", d["nbrParticlesPerCell"][1])
else:
add_int(partinit_path + "nbr_part_per_cell", d["nbrParticlesPerCell"])

add_string("simulation/electromag/name", "EM")
add_string("simulation/electromag/electric/name", "E")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,9 @@ void ConcreteLoadBalancerHybridStrategyNPPC<PHARE_T>::compute(

// TODO here, we have the lb_view value correctly set on all patches. we also know the id_
// so this is where we should call setWorkloadPatchDataIndex... which is a method of the
// CascadePartitioner lb_view is a local container containing the datz the loadbalancezrmanager
// knows the id, as well as the loadbalancerestimator and the loadbalancerestimator is a
// cascadpartotioner
// CascadePartitioner lb_view is a local container containing the data the loadbalancezrmanager
// knows the id, as well as the LoadbalancerEstimator and the loadbalancerestimator is a
// CascadePartitioner
}

} // namespace PHARE::amr
Expand Down
10 changes: 7 additions & 3 deletions src/amr/load_balancing/load_balancer_manager.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ class LoadBalancerManager
void addLoadBalancerEstimator(int const iLevel_min, int const iLevel_max,
std::shared_ptr<amr::LoadBalancerEstimator> lbe);

void addLoadBalancer(std::unique_ptr<SAMRAI::mesh::LoadBalanceStrategy> loadBalancer)
void setLoadBalancer(std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer)
{
loadBalancer_ = std::move(loadBalancer);
loadBalancer_ = loadBalancer;
loadBalancer_->setWorkloadPatchDataIndex(id_);
}

Expand All @@ -57,7 +57,7 @@ class LoadBalancerManager
int const id_;
int const maxLevelNumber_;
std::vector<std::shared_ptr<amr::LoadBalancerEstimator>> loadBalancerEstimators_;
std::unique_ptr<SAMRAI::mesh::LoadBalanceStrategy> loadBalancer_;
std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer_;
};


Expand Down Expand Up @@ -104,6 +104,10 @@ LoadBalancerManager<dim>::estimate(SAMRAI::hier::PatchLevel& level,
auto lbe = loadBalancerEstimators_[iLevel];

lbe->estimate(level, model);

// loadBalancer_->setWorkloadPatchDataIndex(id_, level.getLevelNumber());

Check notice

Code scanning / CodeQL

Commented-out code Note

This comment appears to contain commented-out code.

// loadBalancer_->printStatistics(std::cout);

Check notice

Code scanning / CodeQL

Commented-out code Note

This comment appears to contain commented-out code.
}

} // namespace PHARE::amr
Expand Down
53 changes: 53 additions & 0 deletions src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,52 @@ namespace amr
}


void
swap_L0_particles_if_allowed(std::shared_ptr<SAMRAI::hier::PatchHierarchy> const& hierarchy,
std::shared_ptr<SAMRAI::hier::PatchLevel> const& oldLevel,
IPhysicalModel& model, double const initDataTime)
{
using ParticleArray_t = typename HybridModel::particle_array_type;

assert(oldLevel->getLevelNumber() == 0);
auto const level = *hierarchy->getPatchLevel(0);

auto& hybridModel = static_cast<HybridModel&>(model);
auto& ions = hybridModel.state.ions;

bool single_patch_domains
= level.getNumberOfPatches() == 1 and oldLevel->getNumberOfPatches() == 1;

PHARE_LOG_LINE_STR(single_patch_domains);

if (single_patch_domains)
{
std::vector<ParticleArray_t*> old_domain, old_patch_ghost;
{
auto dataOnPatch = resourcesManager_->setOnPatch(**oldLevel->begin(), ions);
for (auto& pop : ions)
{
old_domain.push_back(&pop.domainParticles());
old_patch_ghost.push_back(&pop.patchGhostParticles());
}
}
auto dataOnPatch = resourcesManager_->setOnPatch(**level.begin(), ions);
std::size_t idx = 0;

PHARE_LOG_LINE_STR(old_domain.size());

for (auto& pop : ions)
{
PHARE_LOG_LINE_STR(pop.domainParticles().size());
PHARE_LOG_LINE_STR(old_domain[idx]->size());
pop.domainParticles() = std::move(*old_domain[idx]);
pop.patchGhostParticles() = std::move(*old_patch_ghost[idx]);
++idx;
}
}
else
patchGhostPartRefiners_.fill(0, initDataTime);
};

/**
* @brief regrid performs the regriding communications for Hybrid to Hybrid messengers
Expand All @@ -228,12 +274,19 @@ namespace amr

bool isRegriddingL0 = levelNumber == 0 and oldLevel;

// if (isRegriddingL0)
// swap_L0_particles_if_allowed(hierarchy, oldLevel, model, initDataTime);
// else
{
}

magneticInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime);
electricInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime);
domainParticlesRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime);

patchGhostPartRefiners_.fill(levelNumber, initDataTime);


// regriding will fill the new level wherever it has points that overlap
// old level. This will include its level border points.
// These new level border points will thus take values that where previous
Expand Down
2 changes: 0 additions & 2 deletions src/amr/messengers/refiner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,6 @@ class Refiner : private Communicator<RefinerTypes, ResourcesManager::dimension>
}
else
{
PHARE_LOG_LINE_STR(levelNumber << " " << hierarchy->getFinestLevelNumber() << " "
<< oldLevel);
algo->createSchedule(level, oldLevel, level->getNextCoarserHierarchyLevelNumber(),
hierarchy)
->fillData(initDataTime);
Expand Down
4 changes: 3 additions & 1 deletion src/amr/multiphysics_integrator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ namespace solver
bool const isRegridding = oldLevel != nullptr;

PHARE_LOG_LINE_STR("init level " << levelNumber //
<< " with regriding = " << isRegridding
<< " with regridding = " << isRegridding
<< " with allocateData = " << allocateData
<< " with canBeRefined = " << canBeRefined
<< " with initialTime = " << initialTime);
Expand Down Expand Up @@ -369,6 +369,8 @@ namespace solver

solver.onRegrid();
}
else
load_balancer_manager_->estimate(*hierarchy->getPatchLevel(levelNumber), model);
}


Expand Down
13 changes: 4 additions & 9 deletions src/amr/wrappers/integrator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,11 @@ class Integrator

void initialize() { timeRefIntegrator_->initializeHierarchy(); }


Integrator(PHARE::initializer::PHAREDict const& dict,
std::shared_ptr<SAMRAI::hier::PatchHierarchy> hierarchy,
std::shared_ptr<SAMRAI::algs::TimeRefinementLevelStrategy> timeRefLevelStrategy,
std::shared_ptr<SAMRAI::mesh::StandardTagAndInitStrategy> tagAndInitStrategy,
std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer, //
double startTime, double endTime);

private:
Expand Down Expand Up @@ -80,23 +80,18 @@ Integrator<_dimension>::Integrator(
PHARE::initializer::PHAREDict const& dict,
std::shared_ptr<SAMRAI::hier::PatchHierarchy> hierarchy,
std::shared_ptr<SAMRAI::algs::TimeRefinementLevelStrategy> timeRefLevelStrategy,
std::shared_ptr<SAMRAI::mesh::StandardTagAndInitStrategy> tagAndInitStrategy, double startTime,
double endTime)
std::shared_ptr<SAMRAI::mesh::StandardTagAndInitStrategy> tagAndInitStrategy,
std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer, //
double startTime, double endTime)
: rebalance_coarsest{check_rebalance_coarsest(dict)}
{
auto loadBalancer_db = std::make_shared<SAMRAI::tbox::MemoryDatabase>("LoadBalancerDB");
loadBalancer_db->putDouble("flexible_load_tolerance", .75);
auto loadBalancer = std::make_shared<SAMRAI::mesh::CascadePartitioner>(
SAMRAI::tbox::Dimension{dimension}, "LoadBalancer");

loadBalancer->setSAMRAI_MPI(
SAMRAI::tbox::SAMRAI_MPI::getSAMRAIWorld()); // TODO Is it really needed ?

auto refineDB = getUserRefinementBoxesDatabase<dimension>(dict["simulation"]["AMR"]);
auto standardTag = std::make_shared<SAMRAI::mesh::StandardTagAndInitialize>(
"StandardTagAndInitialize", tagAndInitStrategy.get(), refineDB);


auto clustering = [&]() -> std::shared_ptr<SAMRAI::mesh::BoxGeneratorStrategy> {
if (!dict["simulation"]["AMR"].contains("clustering"))
throw std::runtime_error(std::string{"clustering type not specificed"});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,13 @@ class MaxwellianParticleInitializer : public ParticleInitializer<ParticleArray,
std::array<InputFunction, 3> const& thermalVelocity, double const particleCharge,
std::uint32_t const& nbrParticlesPerCell, std::optional<std::size_t> seed = {},
Basis const basis = Basis::Cartesian,
std::array<InputFunction, 3> const magneticField = {nullptr, nullptr, nullptr})
std::array<InputFunction, 3> const magneticField = {nullptr, nullptr, nullptr},

Check notice

Code scanning / CodeQL

Large object passed by value Note

This parameter of type
const array<function<..(..)>, 3UL>
is 96 bytes - consider passing a const pointer/reference instead.
InputFunction const ppc_by_icell = nullptr /*used if set */)
: density_{density}
, bulkVelocity_{bulkVelocity}
, thermalVelocity_{thermalVelocity}
, magneticField_{magneticField}
, ppc_by_icell_{ppc_by_icell}
, particleCharge_{particleCharge}
, nbrParticlePerCell_{nbrParticlesPerCell}
, basis_{basis}
Expand Down Expand Up @@ -84,6 +86,7 @@ class MaxwellianParticleInitializer : public ParticleInitializer<ParticleArray,
std::array<InputFunction, 3> bulkVelocity_;
std::array<InputFunction, 3> thermalVelocity_;
std::array<InputFunction, 3> magneticField_;
InputFunction ppc_by_icell_;

double particleCharge_;
std::uint32_t nbrParticlePerCell_;
Expand All @@ -96,10 +99,12 @@ class MaxwellianInitFunctions
{
public:
template<typename Function, typename FunctionArray, typename Basis, typename... Coords>
MaxwellianInitFunctions(Function& density, FunctionArray& bulkVelocity,
MaxwellianInitFunctions(Function& density, Function& ppc, FunctionArray& bulkVelocity,
FunctionArray& thermalVelocity, FunctionArray& magneticField,
Basis const& basis, Coords const&... coords)
: _n{density(coords...)}
, _ppc{ppc ? ppc(coords...) : nullptr}

{
static_assert(sizeof...(coords) <= 3, "can only provide up to 3 coordinates");
for (std::uint32_t i = 0; i < 3; i++)
Expand All @@ -114,7 +119,10 @@ class MaxwellianInitFunctions

NO_DISCARD std::array<double const*, 3> B() const { return ptrs(_B); }

NO_DISCARD auto operator()() const { return std::make_tuple(_n->data(), ptrs(_V), ptrs(_Vth)); }
NO_DISCARD auto operator()() const
{
return std::make_tuple(_n->data(), _ppc ? _ppc->data() : nullptr, ptrs(_V), ptrs(_Vth));
}

private:
NO_DISCARD std::array<double const*, 3>
Expand All @@ -123,7 +131,7 @@ class MaxwellianInitFunctions
return {v[0]->data(), v[1]->data(), v[2]->data()};
}

std::shared_ptr<PHARE::core::Span<double>> const _n;
std::shared_ptr<PHARE::core::Span<double>> const _n, _ppc;
std::array<std::shared_ptr<PHARE::core::Span<double>>, 3> _B, _V, _Vth;
};

Expand Down Expand Up @@ -166,19 +174,28 @@ void MaxwellianParticleInitializer<ParticleArray, GridLayout>::loadParticles(
return gridLayout.cellCenteredCoordinates(indexes...);
});

auto const fns = std::make_from_tuple<MaxwellianInitFunctions>(std::tuple_cat(
std::forward_as_tuple(density_, bulkVelocity_, thermalVelocity_, magneticField_, basis_),
cellCoords));
auto const fns = std::make_from_tuple<MaxwellianInitFunctions>(
std::tuple_cat(std::forward_as_tuple(density_, ppc_by_icell_, bulkVelocity_,
thermalVelocity_, magneticField_, basis_),
cellCoords));

auto const [n, V, Vth] = fns();
auto randGen = getRNG(rngSeed_);
auto const [n, ppc, V, Vth] = fns();
auto randGen = getRNG(rngSeed_);
ParticleDeltaDistribution<double> deltaDistrib;

for (std::size_t flatCellIdx = 0; flatCellIdx < ndCellIndices.size(); flatCellIdx++)
auto const& ppc_ = ppc; // reference to local binding thing
auto ppc_for_icell = [&](auto const& ficell) {
if (ppc_by_icell_)
return static_cast<std::uint32_t>(ppc_[ficell]);
return nbrParticlePerCell_;
};

for (std::size_t flatCellIdx = 0; flatCellIdx < ndCellIndices.size(); ++flatCellIdx)
{
auto const ppc_per_cell = ppc_for_icell(flatCellIdx);
auto const cellWeight = n[flatCellIdx] / nbrParticlePerCell_;
auto const AMRCellIndex = layout.localToAMR(point(flatCellIdx, ndCellIndices));

auto const iCell = AMRCellIndex.template toArray<int>();
std::array<double, 3> particleVelocity;
std::array<std::array<double, 3>, 3> basis;

Expand All @@ -188,7 +205,7 @@ void MaxwellianParticleInitializer<ParticleArray, GridLayout>::loadParticles(
localMagneticBasis({B[0][flatCellIdx], B[1][flatCellIdx], B[2][flatCellIdx]}, basis);
}

for (std::uint32_t ipart = 0; ipart < nbrParticlePerCell_; ++ipart)
for (std::uint32_t ipart = 0; ipart < ppc_per_cell; ++ipart)
{
maxwellianVelocity({V[0][flatCellIdx], V[1][flatCellIdx], V[2][flatCellIdx]},
{Vth[0][flatCellIdx], Vth[1][flatCellIdx], Vth[2][flatCellIdx]}, //
Expand All @@ -197,8 +214,7 @@ void MaxwellianParticleInitializer<ParticleArray, GridLayout>::loadParticles(
if (basis_ == Basis::Magnetic)
particleVelocity = basisTransform(basis, particleVelocity);

particles.emplace_back(Particle{cellWeight, particleCharge_,
AMRCellIndex.template toArray<int>(),
particles.emplace_back(Particle{cellWeight, particleCharge_, iCell,
deltas(deltaDistrib, randGen), particleVelocity});
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,13 @@ namespace core

auto charge = dict["charge"].template to<double>();

auto nbrPartPerCell = dict["nbr_part_per_cell"].template to<int>();
auto nbrPartPerCell = initializer::dict_get(dict, "nbr_part_per_cell", int{0});
FunctionType nbrPartPerCellFn
= initializer::dict_get(dict, "nbr_part_per_cell_fn", FunctionType{nullptr});
if (not nbrPartPerCellFn and nbrPartPerCell == 0)
{
throw std::runtime_error("PPC cannot be 0");
}

auto basisName = dict["basis"].template to<std::string>();

Expand All @@ -59,22 +65,25 @@ namespace core
if (dict.contains("init") && dict["init"].contains("seed"))
seed = dict["init"]["seed"].template to<std::optional<std::size_t>>();

std::array<FunctionType, 3> magneticField = {nullptr, nullptr, nullptr};

if (basisName == "cartesian")
{
return std::make_unique<
MaxwellianParticleInitializer<ParticleArray, GridLayout>>(
density, v, vth, charge, nbrPartPerCell, seed);
density, v, vth, charge, nbrPartPerCell, seed, Basis::Cartesian,
magneticField, nbrPartPerCellFn);
}
else if (basisName == "magnetic")
{
[[maybe_unused]] Basis basis = Basis::Magnetic;
[[maybe_unused]] auto& bx = dict["magnetic_x"].template to<FunctionType>();
[[maybe_unused]] auto& by = dict["magnetic_x"].template to<FunctionType>();
[[maybe_unused]] auto& bz = dict["magnetic_x"].template to<FunctionType>();
magneticField[0] = dict["magnetic_x"].template to<FunctionType>();
magneticField[1] = dict["magnetic_x"].template to<FunctionType>();
magneticField[2] = dict["magnetic_x"].template to<FunctionType>();

return std::make_unique<
MaxwellianParticleInitializer<ParticleArray, GridLayout>>(
density, v, vth, charge, nbrPartPerCell, seed);
density, v, vth, charge, nbrPartPerCell, seed, Basis::Magnetic,
magneticField, nbrPartPerCellFn);
}
}
// TODO throw?
Expand Down
Loading

0 comments on commit 912e0a8

Please sign in to comment.