Skip to content

Commit

Permalink
imbalance on init experiments
Browse files Browse the repository at this point in the history
  • Loading branch information
PhilipDeegan committed Jan 26, 2024
1 parent d301826 commit b761c8c
Show file tree
Hide file tree
Showing 11 changed files with 214 additions and 85 deletions.
16 changes: 12 additions & 4 deletions pyphare/pyphare/pharein/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,7 @@ def __init__(self, fn):
self.fn = fn

def __call__(self, *xyz):
args = []
for i, arg in enumerate(xyz):
args.append(np.asarray(arg))
args = [np.asarray(arg) for arg in xyz]
ret = self.fn(*args)
if isinstance(ret, list):
ret = np.asarray(ret)
Expand Down Expand Up @@ -219,6 +217,8 @@ def as_paths(rb):
path = f"simulation/advanced/{k}"
if isinstance(v, int):
add_int(path, v)
elif isinstance(v, float):
add_double(path, v)
else:
add_string(path, v)

Expand All @@ -245,12 +245,20 @@ def as_paths(rb):
addInitFunction(partinit_path + "thermal_velocity_x", fn_wrapper(d["vthx"]))
addInitFunction(partinit_path + "thermal_velocity_y", fn_wrapper(d["vthy"]))
addInitFunction(partinit_path + "thermal_velocity_z", fn_wrapper(d["vthz"]))
add_int(partinit_path + "nbr_part_per_cell", d["nbrParticlesPerCell"])
add_double(partinit_path + "charge", d["charge"])
add_string(partinit_path + "basis", "cartesian")
if "init" in d and "seed" in d["init"]:
pp.add_optional_size_t(partinit_path + "init/seed", d["init"]["seed"])

if isinstance(d["nbrParticlesPerCell"], tuple):
addInitFunction(
partinit_path + "nbr_part_per_cell_fn",
fn_wrapper(d["nbrParticlesPerCell"][0]),
)
add_int(partinit_path + "nbr_part_per_cell", d["nbrParticlesPerCell"][1])
else:
add_int(partinit_path + "nbr_part_per_cell", d["nbrParticlesPerCell"])

add_string("simulation/electromag/name", "EM")
add_string("simulation/electromag/electric/name", "E")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,9 @@ void ConcreteLoadBalancerHybridStrategyNPPC<PHARE_T>::compute(

// TODO here, we have the lb_view value correctly set on all patches. we also know the id_
// so this is where we should call setWorkloadPatchDataIndex... which is a method of the
// CascadePartitioner lb_view is a local container containing the datz the loadbalancezrmanager
// knows the id, as well as the loadbalancerestimator and the loadbalancerestimator is a
// cascadpartotioner
// CascadePartitioner lb_view is a local container containing the data the LoadBalancerNanager
// knows the id, as well as the LoadBalancerEstimator and the LoadBalancerEstimator is a
// CascadePartitioner
}

} // namespace PHARE::amr
Expand Down
43 changes: 16 additions & 27 deletions src/amr/load_balancing/load_balancer_manager.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,10 @@
#include "load_balancer_estimator.hpp"



namespace PHARE::amr
{


template<std::size_t dim>
class LoadBalancerManager
{
Expand All @@ -32,14 +33,14 @@ class LoadBalancerManager

~LoadBalancerManager() { variableDatabase_->removeVariable("LoadBalancerVariable"); };

int getId() const;
int getId() const { return id_; }

void addLoadBalancerEstimator(int const iLevel_min, int const iLevel_max,
std::shared_ptr<amr::LoadBalancerEstimator> lbe);

void addLoadBalancer(std::unique_ptr<SAMRAI::mesh::LoadBalanceStrategy> loadBalancer)
void setLoadBalancer(std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer)
{
loadBalancer_ = std::move(loadBalancer);
loadBalancer_ = loadBalancer;
loadBalancer_->setWorkloadPatchDataIndex(id_);
}

Expand All @@ -57,27 +58,17 @@ class LoadBalancerManager
int const id_;
int const maxLevelNumber_;
std::vector<std::shared_ptr<amr::LoadBalancerEstimator>> loadBalancerEstimators_;
std::unique_ptr<SAMRAI::mesh::LoadBalanceStrategy> loadBalancer_;
std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer_;
};




template<std::size_t dim>
inline int LoadBalancerManager<dim>::getId() const
void LoadBalancerManager<dim>::addLoadBalancerEstimator(
int const iLevel_min, int const iLevel_max, std::shared_ptr<amr::LoadBalancerEstimator> lbe)
{
return id_;
}




template<std::size_t dim>
inline void
LoadBalancerManager<dim>::addLoadBalancerEstimator(int const iLevel_min, int const iLevel_max,
std::shared_ptr<amr::LoadBalancerEstimator> lbe)
{
for (auto ilevel = iLevel_min; ilevel <= iLevel_max; ilevel++)
for (auto ilevel = iLevel_min; ilevel <= iLevel_max; ++ilevel)
{
loadBalancerEstimators_[ilevel] = lbe;
}
Expand All @@ -87,25 +78,23 @@ LoadBalancerManager<dim>::addLoadBalancerEstimator(int const iLevel_min, int con


template<std::size_t dim>
inline void LoadBalancerManager<dim>::allocate(SAMRAI::hier::Patch& patch,
double const allocateTime)
void LoadBalancerManager<dim>::allocate(SAMRAI::hier::Patch& patch, double const allocateTime)
{
patch.allocatePatchData(id_, allocateTime);
}



template<std::size_t dim>
inline void
LoadBalancerManager<dim>::estimate(SAMRAI::hier::PatchLevel& level,
PHARE::solver::IPhysicalModel<PHARE::amr::SAMRAI_Types>& model)
void LoadBalancerManager<dim>::estimate(
SAMRAI::hier::PatchLevel& level, PHARE::solver::IPhysicalModel<PHARE::amr::SAMRAI_Types>& model)
{
auto iLevel = level.getLevelNumber();
auto lbe = loadBalancerEstimators_[iLevel];

lbe->estimate(level, model);
if (auto lbe = loadBalancerEstimators_[level.getLevelNumber()])
lbe->estimate(level, model);
}


} // namespace PHARE::amr


#endif
53 changes: 53 additions & 0 deletions src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,52 @@ namespace amr
}


void
swap_L0_particles_if_allowed(std::shared_ptr<SAMRAI::hier::PatchHierarchy> const& hierarchy,
std::shared_ptr<SAMRAI::hier::PatchLevel> const& oldLevel,
IPhysicalModel& model, double const initDataTime)
{
using ParticleArray_t = typename HybridModel::particle_array_type;

assert(oldLevel->getLevelNumber() == 0);
auto const level = *hierarchy->getPatchLevel(0);

auto& hybridModel = static_cast<HybridModel&>(model);
auto& ions = hybridModel.state.ions;

bool single_patch_domains
= level.getNumberOfPatches() == 1 and oldLevel->getNumberOfPatches() == 1;

PHARE_LOG_LINE_STR(single_patch_domains);

if (single_patch_domains)
{
std::vector<ParticleArray_t*> old_domain, old_patch_ghost;
{
auto dataOnPatch = resourcesManager_->setOnPatch(**oldLevel->begin(), ions);
for (auto& pop : ions)
{
old_domain.push_back(&pop.domainParticles());
old_patch_ghost.push_back(&pop.patchGhostParticles());
}
}
auto dataOnPatch = resourcesManager_->setOnPatch(**level.begin(), ions);
std::size_t idx = 0;

PHARE_LOG_LINE_STR(old_domain.size());

for (auto& pop : ions)
{
PHARE_LOG_LINE_STR(pop.domainParticles().size());
PHARE_LOG_LINE_STR(old_domain[idx]->size());
pop.domainParticles() = std::move(*old_domain[idx]);
pop.patchGhostParticles() = std::move(*old_patch_ghost[idx]);
++idx;
}
}
else
patchGhostPartRefiners_.fill(0, initDataTime);
};

/**
* @brief regrid performs the regriding communications for Hybrid to Hybrid messengers
Expand All @@ -228,12 +274,19 @@ namespace amr

bool isRegriddingL0 = levelNumber == 0 and oldLevel;

// if (isRegriddingL0)
// swap_L0_particles_if_allowed(hierarchy, oldLevel, model, initDataTime);
// else
{
}

magneticInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime);
electricInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime);
domainParticlesRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime);

patchGhostPartRefiners_.fill(levelNumber, initDataTime);


// regriding will fill the new level wherever it has points that overlap
// old level. This will include its level border points.
// These new level border points will thus take values that where previous
Expand Down
2 changes: 0 additions & 2 deletions src/amr/messengers/refiner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,6 @@ class Refiner : private Communicator<RefinerTypes, ResourcesManager::dimension>
}
else
{
PHARE_LOG_LINE_STR(levelNumber << " " << hierarchy->getFinestLevelNumber() << " "
<< oldLevel);
algo->createSchedule(level, oldLevel, level->getNextCoarserHierarchyLevelNumber(),
hierarchy)
->fillData(initDataTime);
Expand Down
4 changes: 3 additions & 1 deletion src/amr/multiphysics_integrator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ namespace solver
bool const isRegridding = oldLevel != nullptr;

PHARE_LOG_LINE_STR("init level " << levelNumber //
<< " with regriding = " << isRegridding
<< " with regridding = " << isRegridding
<< " with allocateData = " << allocateData
<< " with canBeRefined = " << canBeRefined
<< " with initialTime = " << initialTime);
Expand Down Expand Up @@ -369,6 +369,8 @@ namespace solver

solver.onRegrid();
}
else
load_balancer_manager_->estimate(*hierarchy->getPatchLevel(levelNumber), model);
}


Expand Down
16 changes: 8 additions & 8 deletions src/amr/wrappers/integrator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,11 @@ class Integrator

void initialize() { timeRefIntegrator_->initializeHierarchy(); }


Integrator(PHARE::initializer::PHAREDict const& dict,
std::shared_ptr<SAMRAI::hier::PatchHierarchy> hierarchy,
std::shared_ptr<SAMRAI::algs::TimeRefinementLevelStrategy> timeRefLevelStrategy,
std::shared_ptr<SAMRAI::mesh::StandardTagAndInitStrategy> tagAndInitStrategy,
std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer, //
double startTime, double endTime);

private:
Expand Down Expand Up @@ -80,14 +80,15 @@ Integrator<_dimension>::Integrator(
PHARE::initializer::PHAREDict const& dict,
std::shared_ptr<SAMRAI::hier::PatchHierarchy> hierarchy,
std::shared_ptr<SAMRAI::algs::TimeRefinementLevelStrategy> timeRefLevelStrategy,
std::shared_ptr<SAMRAI::mesh::StandardTagAndInitStrategy> tagAndInitStrategy, double startTime,
double endTime)
std::shared_ptr<SAMRAI::mesh::StandardTagAndInitStrategy> tagAndInitStrategy,
std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer, //
double startTime, double endTime)
: rebalance_coarsest{check_rebalance_coarsest(dict)}
{
auto loadBalancer_db = std::make_shared<SAMRAI::tbox::MemoryDatabase>("LoadBalancerDB");
loadBalancer_db->putDouble("flexible_load_tolerance", .75);
auto loadBalancer = std::make_shared<SAMRAI::mesh::CascadePartitioner>(
SAMRAI::tbox::Dimension{dimension}, "LoadBalancer");
// auto loadBalancer_db = std::make_shared<SAMRAI::tbox::MemoryDatabase>("LoadBalancerDB");
// loadBalancer_db->putDouble("flexible_load_tolerance", .75);
// auto loadBalancer = std::make_shared<SAMRAI::mesh::CascadePartitioner>(
// SAMRAI::tbox::Dimension{dimension}, "LoadBalancer");

Check notice

Code scanning / CodeQL

Commented-out code Note

This comment appears to contain commented-out code.

loadBalancer->setSAMRAI_MPI(
SAMRAI::tbox::SAMRAI_MPI::getSAMRAIWorld()); // TODO Is it really needed ?
Expand All @@ -96,7 +97,6 @@ Integrator<_dimension>::Integrator(
auto standardTag = std::make_shared<SAMRAI::mesh::StandardTagAndInitialize>(
"StandardTagAndInitialize", tagAndInitStrategy.get(), refineDB);


auto clustering = [&]() -> std::shared_ptr<SAMRAI::mesh::BoxGeneratorStrategy> {
if (!dict["simulation"]["AMR"].contains("clustering"))
throw std::runtime_error(std::string{"clustering type not specificed"});
Expand Down
Loading

0 comments on commit b761c8c

Please sign in to comment.