Skip to content

Commit

Permalink
Load balancer python registerable object with defaults
Browse files Browse the repository at this point in the history
  • Loading branch information
PhilipDeegan committed Feb 16, 2024
1 parent 1a6366f commit 4c1b83b
Show file tree
Hide file tree
Showing 9 changed files with 162 additions and 125 deletions.
24 changes: 14 additions & 10 deletions pyphare/pyphare/pharein/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
serialize as serialize_sim,
deserialize as deserialize_sim,
)
from .load_balancer import LoadBalancer


def getSimulation():
Expand Down Expand Up @@ -118,6 +119,9 @@ def populateDict():
def add_int(path, val):
pp.add_int(path, int(val))

def add_bool(path, val):
pp.add_bool(path, bool(val))

def add_double(path, val):
pp.add_double(path, float(val))

Expand Down Expand Up @@ -172,8 +176,6 @@ def add_vector_int(path, val):

add_int("simulation/AMR/tag_buffer", simulation.tag_buffer)

add_string("simulation/AMR/loadbalancing", simulation.loadbalancing)

refinement_boxes = simulation.refinement_boxes

def as_paths(rb):
Expand Down Expand Up @@ -213,14 +215,16 @@ def as_paths(rb):
add_double("simulation/algo/ohm/resistivity", simulation.resistivity)
add_double("simulation/algo/ohm/hyper_resistivity", simulation.hyper_resistivity)

for k, v in simulation.advanced.items():
path = f"simulation/advanced/{k}"
if isinstance(v, int):
add_int(path, v)
elif isinstance(v, float):
add_double(path, v)
else:
add_string(path, v)
# load balancer block start
lb = simulation.load_balancer or LoadBalancer(_register=False)
base = "simulation/AMR/loadbalancing"
add_bool(f"{base}/auto", lb.auto)
add_bool(f"{base}/active", lb.active)
add_bool(f"{base}/on_init", lb.on_init)
add_size_t(f"{base}/every", lb.every)
add_string(f"{base}/mode", lb.mode)
add_double(f"{base}/tolerance", lb.tol)
# load balancer block end

init_model = simulation.model
modelDict = init_model.model_dict
Expand Down
45 changes: 45 additions & 0 deletions pyphare/pyphare/pharein/load_balancer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#
#

from dataclasses import dataclass, field
from . import global_vars as gv


@dataclass
class LoadBalancer:
# whether or not load balancing is performed
active: bool = field(default_factory=lambda: False)

# which way load is assessed
mode: str = field(default_factory=lambda: "nppc")

# acceptable imbalance essentially
tol: float = field(default_factory=lambda: 0.05)

# if auto, other values are not used if active
auto: bool = field(default_factory=lambda: True)

# if !auto these values are used if active
on_init: bool = field(default_factory=lambda: True)
every: int = field(default_factory=lambda: 1)

# internal, allows not registering object for default init
_register: bool = field(default_factory=lambda: True)

def __post_init__(self):
allowed_modes = [
"nppc", # count particles per rank
"homogeneous", # count cells per rank
]

if self.mode not in allowed_modes:
raise RuntimeError(f"LoadBalancer mode '{self.mode}' is not valid")

if self._register:
if not gv.sim:
raise RuntimeError(
f"LoadBalancer cannot be registered as no simulation exists"
)
if gv.sim.load_balancer:
raise RuntimeError(f"LoadBalancer is already registered to simulation")
gv.sim.load_balancer = self
1 change: 1 addition & 0 deletions pyphare/pyphare/pharein/simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -840,6 +840,7 @@ def __init__(self, **kwargs):
self.diagnostics = {}
self.model = None
self.electrons = None
self.load_balancer = None

# hard coded in C++ MultiPhysicsIntegrator::getMaxFinerLevelDt
self.nSubcycles = 4
Expand Down
36 changes: 36 additions & 0 deletions src/amr/load_balancing/load_balancer_details.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#ifndef PHARE_AMR_LOAD_BALANCER_LOAD_BALANCER_DETAILS_HPP
#define PHARE_AMR_LOAD_BALANCER_LOAD_BALANCER_DETAILS_HPP

#include <string>
#include <cstdint>

#include "core/logger.hpp"
#include "initializer/data_provider.hpp"

namespace PHARE::amr
{
struct LoadBalancerDetails
{
bool const active = false;
bool const automatic = false;
bool const on_init = false;

std::size_t const every = 0;
std::string const mode;

double const tolerance = .05;

LoadBalancerDetails static FROM(initializer::PHAREDict const& dict)
{
return {cppdict::get_value(dict, "active", false),
cppdict::get_value(dict, "auto", false),
cppdict::get_value(dict, "on_init", false),
cppdict::get_value(dict, "every", std::size_t{0}),
cppdict::get_value(dict, "mode", std::string{"nppc"}),
cppdict::get_value(dict, "tol", 0.05)};
}
};

} // namespace PHARE::amr

#endif /* PHARE_AMR_LOAD_BALANCER_LOAD_BALANCER_DETAILS_HPP */
129 changes: 45 additions & 84 deletions src/amr/wrappers/integrator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,48 +24,22 @@

#include "initializer/data_provider.hpp"

#include "amr/load_balancing/load_balancer_details.hpp"


namespace PHARE::amr
{

template<std::size_t _dimension>
class Integrator
{
int static constexpr rebalance_coarsest_every_default = 1000;

bool static _rebalance_coarsest(initializer::PHAREDict const& dict)
{
return cppdict::get_value(dict, "simulation/advanced/integrator/rebalance_coarsest", 0) > 0;
}

bool static _rebalance_coarsest_on_init(initializer::PHAREDict const& dict)
{
return cppdict::get_value(dict, "simulation/advanced/integrator/rebalance_coarsest_on_init",
0)
> 0;
}

std::size_t static _rebalance_coarsest_every(initializer::PHAREDict const& dict)
{
auto in
= cppdict::get_value(dict, "simulation/advanced/integrator/rebalance_coarsest_every",
rebalance_coarsest_every_default);
if (in < 0)
throw std::runtime_error("rebalance_coarsest_every must be positive");
return static_cast<std::size_t>(in);
}

bool static _is_tagging_refinement(initializer::PHAREDict const& dict)
{
return cppdict::get_value(dict, "simulation/AMR/refinement/tagging/method",
std::string{"none"})
== std::string{"auto"};
}

bool static _rebalance_coarsest_auto(initializer::PHAREDict const& dict)
{
return cppdict::get_value(dict, "simulation/advanced/integrator/rebalance_coarsest_auto", 0)
> 0;
}

public:
static constexpr std::size_t dimension = _dimension;
Expand All @@ -74,11 +48,6 @@ class Integrator
{
bool rebalance_coarsest_now = _should_rebalance_now();

PHARE_LOG_LINE_STR(is_tagging_refinement
<< " " << time_step_idx << " " << rebalance_coarsest << " "
<< rebalance_coarsest_on_init << " " << rebalance_coarsest_every << " "
<< rebalance_coarsest_now);

auto new_time = timeRefIntegrator_->advanceHierarchy(dt, rebalance_coarsest_now);
++time_step_idx;
return new_time;
Expand All @@ -91,18 +60,15 @@ class Integrator
std::shared_ptr<SAMRAI::algs::TimeRefinementLevelStrategy> timeRefLevelStrategy,
std::shared_ptr<SAMRAI::mesh::StandardTagAndInitStrategy> tagAndInitStrategy,
std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer, //
double startTime, double endTime, int loadBalancerPatchId);
double startTime, double endTime, amr::LoadBalancerDetails const& lb_info,
int loadBalancerPatchId);

private:
amr::LoadBalancerDetails const lb_info_;
bool const is_tagging_refinement = false;
bool const rebalance_coarsest = false;
bool const rebalance_coarsest_on_init = false;
bool const rebalance_coarsest_auto = false;
int loadBalancerPatchId_ = -1;
std::size_t rebalance_coarsest_auto_back_off = 0;
std::size_t time_step_idx = 0;
std::size_t const rebalance_coarsest_every = rebalance_coarsest_every_default;
int loadBalancerPatchId_ = -1;
double loadTolerance_ = .05;
std::size_t rebalance_coarsest_auto_back_off_by = 1;


Expand All @@ -123,40 +89,7 @@ class Integrator
return load;
}

bool _should_rebalance_now()
{
if (is_tagging_refinement and rebalance_coarsest)
{
if (rebalance_coarsest_auto)
{
if (rebalance_coarsest_auto_back_off == 0)
{
auto workLoads = core::mpi::collect(computeNonUniformWorkLoadForLevel0());

auto max_value = *std::max_element(workLoads.begin(), workLoads.end());
for (auto& workload : workLoads)
workload /= max_value;
auto min_value = *std::min_element(workLoads.begin(), workLoads.end());
if ((1 - min_value) > loadTolerance_)
{
rebalance_coarsest_auto_back_off_by = 8;
rebalance_coarsest_auto_back_off = rebalance_coarsest_auto_back_off_by;
return true;
}

rebalance_coarsest_auto_back_off_by *= 2;
rebalance_coarsest_auto_back_off = rebalance_coarsest_auto_back_off_by;
}
else
--rebalance_coarsest_auto_back_off;
}
else // maybe redundant with above calculations
return ((time_step_idx == 0 and rebalance_coarsest_on_init)
or (time_step_idx > 0 and rebalance_coarsest_every > 0
and time_step_idx % rebalance_coarsest_every == 0));
}
return false;
}
bool _should_rebalance_now();
};


Expand All @@ -180,16 +113,11 @@ Integrator<_dimension>::Integrator(
std::shared_ptr<SAMRAI::hier::PatchHierarchy> hierarchy,
std::shared_ptr<SAMRAI::algs::TimeRefinementLevelStrategy> timeRefLevelStrategy,
std::shared_ptr<SAMRAI::mesh::StandardTagAndInitStrategy> tagAndInitStrategy,
std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer, //
double startTime, double endTime, int loadBalancerPatchId)
: is_tagging_refinement{_is_tagging_refinement(dict)}
, rebalance_coarsest{_rebalance_coarsest(dict)}
, rebalance_coarsest_on_init{_rebalance_coarsest_on_init(dict)}
, rebalance_coarsest_auto{_rebalance_coarsest_auto(dict)}
, rebalance_coarsest_every{_rebalance_coarsest_every(dict)}
std::shared_ptr<SAMRAI::mesh::CascadePartitioner> loadBalancer, double startTime,
double endTime, amr::LoadBalancerDetails const& lb_info, int loadBalancerPatchId)
: lb_info_{lb_info}
, is_tagging_refinement{_is_tagging_refinement(dict)}
, loadBalancerPatchId_{loadBalancerPatchId}
, loadTolerance_{
cppdict::get_value(dict, "simulation/advanced/integrator/flexible_load_tolerance", .05)}
{
loadBalancer->setSAMRAI_MPI(
SAMRAI::tbox::SAMRAI_MPI::getSAMRAIWorld()); // TODO Is it really needed ?
Expand Down Expand Up @@ -240,8 +168,41 @@ Integrator<_dimension>::Integrator(
"TimeRefinementIntegrator", db, hierarchy, timeRefLevelStrategy, gridding);
}

template<std::size_t _dimension>
bool Integrator<_dimension>::_should_rebalance_now()
{
if (is_tagging_refinement and lb_info_.active)
{
if (lb_info_.automatic)
{
if (rebalance_coarsest_auto_back_off == 0)
{
auto workLoads = core::mpi::collect(computeNonUniformWorkLoadForLevel0());

auto max_value = *std::max_element(workLoads.begin(), workLoads.end());
for (auto& workload : workLoads)
workload /= max_value;
auto min_value = *std::min_element(workLoads.begin(), workLoads.end());
if ((1 - min_value) > lb_info_.tolerance)
{
rebalance_coarsest_auto_back_off_by = 8; // todo decide final approach
rebalance_coarsest_auto_back_off = rebalance_coarsest_auto_back_off_by;
return true;
}

rebalance_coarsest_auto_back_off_by *= 2;
rebalance_coarsest_auto_back_off = rebalance_coarsest_auto_back_off_by;
}
else
--rebalance_coarsest_auto_back_off;
}
else
return ((time_step_idx == 0 and lb_info_.on_init)
or (time_step_idx > 0 and lb_info_.every > 0
and time_step_idx % lb_info_.every == 0));
}
return false;
}

template<std::size_t dimension>
std::shared_ptr<SAMRAI::tbox::MemoryDatabase>
Expand Down
6 changes: 3 additions & 3 deletions src/initializer/data_provider.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,9 @@ namespace initializer
using InitFunction = typename InitFunctionHelper<double, dim>::type;


using PHAREDict = cppdict::Dict<int, std::vector<int>, double, std::vector<double>, std::size_t,
std::optional<std::size_t>, std::string, InitFunction<1>,
InitFunction<2>, InitFunction<3>>;
using PHAREDict = cppdict::Dict<bool, int, std::vector<int>, double, std::vector<double>,
std::size_t, std::optional<std::size_t>, std::string,
InitFunction<1>, InitFunction<2>, InitFunction<3>>;


class PHAREDictHandler
Expand Down
1 change: 1 addition & 0 deletions src/initializer/dictator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ PYBIND11_MODULE(dictator, m)
m.def("add_size_t", add<std::size_t>, "add_size_t");
m.def("add_optional_size_t", add<std::optional<std::size_t>>, "add_optional_size_t");

m.def("add_bool", add<bool>, "add");
m.def("add_int", add<int>, "add");
m.def("add_vector_int", add<std::vector<int>>, "add");
m.def("add_double", add<double>, "add");
Expand Down
Loading

0 comments on commit 4c1b83b

Please sign in to comment.