diff --git a/pyphare/pyphare/pharein/__init__.py b/pyphare/pyphare/pharein/__init__.py index a1a392aec..9ff8f7d57 100644 --- a/pyphare/pyphare/pharein/__init__.py +++ b/pyphare/pyphare/pharein/__init__.py @@ -78,9 +78,7 @@ def __init__(self, fn): self.fn = fn def __call__(self, *xyz): - args = [] - for i, arg in enumerate(xyz): - args.append(np.asarray(arg)) + args = [np.asarray(arg) for arg in xyz] ret = self.fn(*args) if isinstance(ret, list): ret = np.asarray(ret) @@ -219,6 +217,8 @@ def as_paths(rb): path = f"simulation/advanced/{k}" if isinstance(v, int): add_int(path, v) + elif isinstance(v, float): + add_double(path, v) else: add_string(path, v) @@ -245,12 +245,20 @@ def as_paths(rb): addInitFunction(partinit_path + "thermal_velocity_x", fn_wrapper(d["vthx"])) addInitFunction(partinit_path + "thermal_velocity_y", fn_wrapper(d["vthy"])) addInitFunction(partinit_path + "thermal_velocity_z", fn_wrapper(d["vthz"])) - add_int(partinit_path + "nbr_part_per_cell", d["nbrParticlesPerCell"]) add_double(partinit_path + "charge", d["charge"]) add_string(partinit_path + "basis", "cartesian") if "init" in d and "seed" in d["init"]: pp.add_optional_size_t(partinit_path + "init/seed", d["init"]["seed"]) + if isinstance(d["nbrParticlesPerCell"], tuple): + addInitFunction( + partinit_path + "nbr_part_per_cell_fn", + fn_wrapper(d["nbrParticlesPerCell"][0]), + ) + add_int(partinit_path + "nbr_part_per_cell", d["nbrParticlesPerCell"][1]) + else: + add_int(partinit_path + "nbr_part_per_cell", d["nbrParticlesPerCell"]) + add_string("simulation/electromag/name", "EM") add_string("simulation/electromag/electric/name", "E") diff --git a/src/amr/load_balancing/concrete_load_balancer_hybrid_strategy_nppc.hpp b/src/amr/load_balancing/concrete_load_balancer_hybrid_strategy_nppc.hpp index ff6ea7865..d97a77190 100644 --- a/src/amr/load_balancing/concrete_load_balancer_hybrid_strategy_nppc.hpp +++ b/src/amr/load_balancing/concrete_load_balancer_hybrid_strategy_nppc.hpp @@ -90,9 +90,9 @@ void ConcreteLoadBalancerHybridStrategyNPPC::compute( // TODO here, we have the lb_view value correctly set on all patches. we also know the id_ // so this is where we should call setWorkloadPatchDataIndex... which is a method of the - // CascadePartitioner lb_view is a local container containing the datz the loadbalancezrmanager - // knows the id, as well as the loadbalancerestimator and the loadbalancerestimator is a - // cascadpartotioner + // CascadePartitioner lb_view is a local container containing the data the LoadBalancerNanager + // knows the id, as well as the LoadBalancerEstimator and the LoadBalancerEstimator is a + // CascadePartitioner } } // namespace PHARE::amr diff --git a/src/amr/load_balancing/load_balancer_manager.hpp b/src/amr/load_balancing/load_balancer_manager.hpp index dd89ff97a..c9a021789 100644 --- a/src/amr/load_balancing/load_balancer_manager.hpp +++ b/src/amr/load_balancing/load_balancer_manager.hpp @@ -12,9 +12,10 @@ #include "load_balancer_estimator.hpp" - namespace PHARE::amr { + + template class LoadBalancerManager { @@ -32,14 +33,14 @@ class LoadBalancerManager ~LoadBalancerManager() { variableDatabase_->removeVariable("LoadBalancerVariable"); }; - int getId() const; + int getId() const { return id_; } void addLoadBalancerEstimator(int const iLevel_min, int const iLevel_max, std::shared_ptr lbe); - void addLoadBalancer(std::unique_ptr loadBalancer) + void setLoadBalancer(std::shared_ptr loadBalancer) { - loadBalancer_ = std::move(loadBalancer); + loadBalancer_ = loadBalancer; loadBalancer_->setWorkloadPatchDataIndex(id_); } @@ -57,27 +58,17 @@ class LoadBalancerManager int const id_; int const maxLevelNumber_; std::vector> loadBalancerEstimators_; - std::unique_ptr loadBalancer_; + std::shared_ptr loadBalancer_; }; template -inline int LoadBalancerManager::getId() const +void LoadBalancerManager::addLoadBalancerEstimator( + int const iLevel_min, int const iLevel_max, std::shared_ptr lbe) { - return id_; -} - - - - -template -inline void -LoadBalancerManager::addLoadBalancerEstimator(int const iLevel_min, int const iLevel_max, - std::shared_ptr lbe) -{ - for (auto ilevel = iLevel_min; ilevel <= iLevel_max; ilevel++) + for (auto ilevel = iLevel_min; ilevel <= iLevel_max; ++ilevel) { loadBalancerEstimators_[ilevel] = lbe; } @@ -87,8 +78,7 @@ LoadBalancerManager::addLoadBalancerEstimator(int const iLevel_min, int con template -inline void LoadBalancerManager::allocate(SAMRAI::hier::Patch& patch, - double const allocateTime) +void LoadBalancerManager::allocate(SAMRAI::hier::Patch& patch, double const allocateTime) { patch.allocatePatchData(id_, allocateTime); } @@ -96,16 +86,15 @@ inline void LoadBalancerManager::allocate(SAMRAI::hier::Patch& patch, template -inline void -LoadBalancerManager::estimate(SAMRAI::hier::PatchLevel& level, - PHARE::solver::IPhysicalModel& model) +void LoadBalancerManager::estimate( + SAMRAI::hier::PatchLevel& level, PHARE::solver::IPhysicalModel& model) { - auto iLevel = level.getLevelNumber(); - auto lbe = loadBalancerEstimators_[iLevel]; - - lbe->estimate(level, model); + if (auto lbe = loadBalancerEstimators_[level.getLevelNumber()]) + lbe->estimate(level, model); } + } // namespace PHARE::amr + #endif diff --git a/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp b/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp index 3c0bca877..52d4b3846 100644 --- a/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp +++ b/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp @@ -213,6 +213,52 @@ namespace amr } + void + swap_L0_particles_if_allowed(std::shared_ptr const& hierarchy, + std::shared_ptr const& oldLevel, + IPhysicalModel& model, double const initDataTime) + { + using ParticleArray_t = typename HybridModel::particle_array_type; + + assert(oldLevel->getLevelNumber() == 0); + auto const level = *hierarchy->getPatchLevel(0); + + auto& hybridModel = static_cast(model); + auto& ions = hybridModel.state.ions; + + bool single_patch_domains + = level.getNumberOfPatches() == 1 and oldLevel->getNumberOfPatches() == 1; + + PHARE_LOG_LINE_STR(single_patch_domains); + + if (single_patch_domains) + { + std::vector old_domain, old_patch_ghost; + { + auto dataOnPatch = resourcesManager_->setOnPatch(**oldLevel->begin(), ions); + for (auto& pop : ions) + { + old_domain.push_back(&pop.domainParticles()); + old_patch_ghost.push_back(&pop.patchGhostParticles()); + } + } + auto dataOnPatch = resourcesManager_->setOnPatch(**level.begin(), ions); + std::size_t idx = 0; + + PHARE_LOG_LINE_STR(old_domain.size()); + + for (auto& pop : ions) + { + PHARE_LOG_LINE_STR(pop.domainParticles().size()); + PHARE_LOG_LINE_STR(old_domain[idx]->size()); + pop.domainParticles() = std::move(*old_domain[idx]); + pop.patchGhostParticles() = std::move(*old_patch_ghost[idx]); + ++idx; + } + } + else + patchGhostPartRefiners_.fill(0, initDataTime); + }; /** * @brief regrid performs the regriding communications for Hybrid to Hybrid messengers @@ -228,12 +274,19 @@ namespace amr bool isRegriddingL0 = levelNumber == 0 and oldLevel; + // if (isRegriddingL0) + // swap_L0_particles_if_allowed(hierarchy, oldLevel, model, initDataTime); + // else + { + } + magneticInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); electricInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); domainParticlesRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); patchGhostPartRefiners_.fill(levelNumber, initDataTime); + // regriding will fill the new level wherever it has points that overlap // old level. This will include its level border points. // These new level border points will thus take values that where previous diff --git a/src/amr/messengers/refiner.hpp b/src/amr/messengers/refiner.hpp index f18e77355..c7edd7487 100644 --- a/src/amr/messengers/refiner.hpp +++ b/src/amr/messengers/refiner.hpp @@ -164,8 +164,6 @@ class Refiner : private Communicator } else { - PHARE_LOG_LINE_STR(levelNumber << " " << hierarchy->getFinestLevelNumber() << " " - << oldLevel); algo->createSchedule(level, oldLevel, level->getNextCoarserHierarchyLevelNumber(), hierarchy) ->fillData(initDataTime); diff --git a/src/amr/multiphysics_integrator.hpp b/src/amr/multiphysics_integrator.hpp index 16775c241..27891418e 100644 --- a/src/amr/multiphysics_integrator.hpp +++ b/src/amr/multiphysics_integrator.hpp @@ -314,7 +314,7 @@ namespace solver bool const isRegridding = oldLevel != nullptr; PHARE_LOG_LINE_STR("init level " << levelNumber // - << " with regriding = " << isRegridding + << " with regridding = " << isRegridding << " with allocateData = " << allocateData << " with canBeRefined = " << canBeRefined << " with initialTime = " << initialTime); @@ -369,6 +369,8 @@ namespace solver solver.onRegrid(); } + else + load_balancer_manager_->estimate(*hierarchy->getPatchLevel(levelNumber), model); } diff --git a/src/amr/wrappers/integrator.hpp b/src/amr/wrappers/integrator.hpp index 436f66c58..298fd7537 100644 --- a/src/amr/wrappers/integrator.hpp +++ b/src/amr/wrappers/integrator.hpp @@ -48,11 +48,11 @@ class Integrator void initialize() { timeRefIntegrator_->initializeHierarchy(); } - Integrator(PHARE::initializer::PHAREDict const& dict, std::shared_ptr hierarchy, std::shared_ptr timeRefLevelStrategy, std::shared_ptr tagAndInitStrategy, + std::shared_ptr loadBalancer, // double startTime, double endTime); private: @@ -80,14 +80,15 @@ Integrator<_dimension>::Integrator( PHARE::initializer::PHAREDict const& dict, std::shared_ptr hierarchy, std::shared_ptr timeRefLevelStrategy, - std::shared_ptr tagAndInitStrategy, double startTime, - double endTime) + std::shared_ptr tagAndInitStrategy, + std::shared_ptr loadBalancer, // + double startTime, double endTime) : rebalance_coarsest{check_rebalance_coarsest(dict)} { - auto loadBalancer_db = std::make_shared("LoadBalancerDB"); - loadBalancer_db->putDouble("flexible_load_tolerance", .75); - auto loadBalancer = std::make_shared( - SAMRAI::tbox::Dimension{dimension}, "LoadBalancer"); + // auto loadBalancer_db = std::make_shared("LoadBalancerDB"); + // loadBalancer_db->putDouble("flexible_load_tolerance", .75); + // auto loadBalancer = std::make_shared( + // SAMRAI::tbox::Dimension{dimension}, "LoadBalancer"); loadBalancer->setSAMRAI_MPI( SAMRAI::tbox::SAMRAI_MPI::getSAMRAIWorld()); // TODO Is it really needed ? @@ -96,7 +97,6 @@ Integrator<_dimension>::Integrator( auto standardTag = std::make_shared( "StandardTagAndInitialize", tagAndInitStrategy.get(), refineDB); - auto clustering = [&]() -> std::shared_ptr { if (!dict["simulation"]["AMR"].contains("clustering")) throw std::runtime_error(std::string{"clustering type not specificed"}); diff --git a/src/core/data/ions/particle_initializers/maxwellian_particle_initializer.hpp b/src/core/data/ions/particle_initializers/maxwellian_particle_initializer.hpp index caf21df97..6977bd67c 100644 --- a/src/core/data/ions/particle_initializers/maxwellian_particle_initializer.hpp +++ b/src/core/data/ions/particle_initializers/maxwellian_particle_initializer.hpp @@ -43,11 +43,13 @@ class MaxwellianParticleInitializer : public ParticleInitializer const& thermalVelocity, double const particleCharge, std::uint32_t const& nbrParticlesPerCell, std::optional seed = {}, Basis const basis = Basis::Cartesian, - std::array const magneticField = {nullptr, nullptr, nullptr}) + std::array const magneticField = {nullptr, nullptr, nullptr}, + InputFunction const ppc_by_icell = nullptr /*used if set */) : density_{density} , bulkVelocity_{bulkVelocity} , thermalVelocity_{thermalVelocity} , magneticField_{magneticField} + , ppc_by_icell_{ppc_by_icell} , particleCharge_{particleCharge} , nbrParticlePerCell_{nbrParticlesPerCell} , basis_{basis} @@ -84,6 +86,7 @@ class MaxwellianParticleInitializer : public ParticleInitializer bulkVelocity_; std::array thermalVelocity_; std::array magneticField_; + InputFunction ppc_by_icell_; double particleCharge_; std::uint32_t nbrParticlePerCell_; @@ -96,10 +99,12 @@ class MaxwellianInitFunctions { public: template - MaxwellianInitFunctions(Function& density, FunctionArray& bulkVelocity, + MaxwellianInitFunctions(Function& density, Function& ppc, FunctionArray& bulkVelocity, FunctionArray& thermalVelocity, FunctionArray& magneticField, Basis const& basis, Coords const&... coords) : _n{density(coords...)} + , _ppc{ppc ? ppc(coords...) : nullptr} + { static_assert(sizeof...(coords) <= 3, "can only provide up to 3 coordinates"); for (std::uint32_t i = 0; i < 3; i++) @@ -114,7 +119,10 @@ class MaxwellianInitFunctions NO_DISCARD std::array B() const { return ptrs(_B); } - NO_DISCARD auto operator()() const { return std::make_tuple(_n->data(), ptrs(_V), ptrs(_Vth)); } + NO_DISCARD auto operator()() const + { + return std::make_tuple(_n->data(), _ppc ? _ppc->data() : nullptr, ptrs(_V), ptrs(_Vth)); + } private: NO_DISCARD std::array @@ -123,7 +131,7 @@ class MaxwellianInitFunctions return {v[0]->data(), v[1]->data(), v[2]->data()}; } - std::shared_ptr> const _n; + std::shared_ptr> const _n, _ppc; std::array>, 3> _B, _V, _Vth; }; @@ -166,19 +174,28 @@ void MaxwellianParticleInitializer::loadParticles( return gridLayout.cellCenteredCoordinates(indexes...); }); - auto const fns = std::make_from_tuple(std::tuple_cat( - std::forward_as_tuple(density_, bulkVelocity_, thermalVelocity_, magneticField_, basis_), - cellCoords)); + auto const fns = std::make_from_tuple( + std::tuple_cat(std::forward_as_tuple(density_, ppc_by_icell_, bulkVelocity_, + thermalVelocity_, magneticField_, basis_), + cellCoords)); - auto const [n, V, Vth] = fns(); - auto randGen = getRNG(rngSeed_); + auto const [n, ppc, V, Vth] = fns(); + auto randGen = getRNG(rngSeed_); ParticleDeltaDistribution deltaDistrib; - for (std::size_t flatCellIdx = 0; flatCellIdx < ndCellIndices.size(); flatCellIdx++) + auto const& ppc_ = ppc; // reference to local binding thing + auto ppc_for_icell = [&](auto const& ficell) { + if (ppc_by_icell_) + return static_cast(ppc_[ficell]); + return nbrParticlePerCell_; + }; + + for (std::size_t flatCellIdx = 0; flatCellIdx < ndCellIndices.size(); ++flatCellIdx) { + auto const ppc_per_cell = ppc_for_icell(flatCellIdx); auto const cellWeight = n[flatCellIdx] / nbrParticlePerCell_; auto const AMRCellIndex = layout.localToAMR(point(flatCellIdx, ndCellIndices)); - + auto const iCell = AMRCellIndex.template toArray(); std::array particleVelocity; std::array, 3> basis; @@ -188,7 +205,7 @@ void MaxwellianParticleInitializer::loadParticles( localMagneticBasis({B[0][flatCellIdx], B[1][flatCellIdx], B[2][flatCellIdx]}, basis); } - for (std::uint32_t ipart = 0; ipart < nbrParticlePerCell_; ++ipart) + for (std::uint32_t ipart = 0; ipart < ppc_per_cell; ++ipart) { maxwellianVelocity({V[0][flatCellIdx], V[1][flatCellIdx], V[2][flatCellIdx]}, {Vth[0][flatCellIdx], Vth[1][flatCellIdx], Vth[2][flatCellIdx]}, // @@ -197,8 +214,7 @@ void MaxwellianParticleInitializer::loadParticles( if (basis_ == Basis::Magnetic) particleVelocity = basisTransform(basis, particleVelocity); - particles.emplace_back(Particle{cellWeight, particleCharge_, - AMRCellIndex.template toArray(), + particles.emplace_back(Particle{cellWeight, particleCharge_, iCell, deltas(deltaDistrib, randGen), particleVelocity}); } } diff --git a/src/core/data/ions/particle_initializers/particle_initializer_factory.hpp b/src/core/data/ions/particle_initializers/particle_initializer_factory.hpp index 2f4cfa3f2..eb1359177 100644 --- a/src/core/data/ions/particle_initializers/particle_initializer_factory.hpp +++ b/src/core/data/ions/particle_initializers/particle_initializer_factory.hpp @@ -47,7 +47,13 @@ namespace core auto charge = dict["charge"].template to(); - auto nbrPartPerCell = dict["nbr_part_per_cell"].template to(); + auto nbrPartPerCell = initializer::dict_get(dict, "nbr_part_per_cell", int{0}); + FunctionType nbrPartPerCellFn + = initializer::dict_get(dict, "nbr_part_per_cell_fn", FunctionType{nullptr}); + if (not nbrPartPerCellFn and nbrPartPerCell == 0) + { + throw std::runtime_error("PPC cannot be 0"); + } auto basisName = dict["basis"].template to(); @@ -59,22 +65,25 @@ namespace core if (dict.contains("init") && dict["init"].contains("seed")) seed = dict["init"]["seed"].template to>(); + std::array magneticField = {nullptr, nullptr, nullptr}; + if (basisName == "cartesian") { return std::make_unique< MaxwellianParticleInitializer>( - density, v, vth, charge, nbrPartPerCell, seed); + density, v, vth, charge, nbrPartPerCell, seed, Basis::Cartesian, + magneticField, nbrPartPerCellFn); } else if (basisName == "magnetic") { - [[maybe_unused]] Basis basis = Basis::Magnetic; - [[maybe_unused]] auto& bx = dict["magnetic_x"].template to(); - [[maybe_unused]] auto& by = dict["magnetic_x"].template to(); - [[maybe_unused]] auto& bz = dict["magnetic_x"].template to(); + magneticField[0] = dict["magnetic_x"].template to(); + magneticField[1] = dict["magnetic_x"].template to(); + magneticField[2] = dict["magnetic_x"].template to(); return std::make_unique< MaxwellianParticleInitializer>( - density, v, vth, charge, nbrPartPerCell, seed); + density, v, vth, charge, nbrPartPerCell, seed, Basis::Magnetic, + magneticField, nbrPartPerCellFn); } } // TODO throw? diff --git a/src/simulator/simulator.hpp b/src/simulator/simulator.hpp index 012342905..0ec21cdde 100644 --- a/src/simulator/simulator.hpp +++ b/src/simulator/simulator.hpp @@ -262,26 +262,39 @@ void Simulator::hybrid_init(initializer::PHAREDict multiphysInteg_->registerTagger(0, maxLevelNumber_ - 1, std::move(hybridTagger_)); - - auto lbm_ = std::make_unique>(dict); - auto lbe_ = std::make_shared>( dict["simulation"]["AMR"]["loadbalancing"].template to(), lbm_->getId()); - lbm_->addLoadBalancerEstimator(0, maxLevelNumber_ - 1, std::move(lbe_)); - lbm_->addLoadBalancer(std::make_unique( - SAMRAI::tbox::Dimension{dim}, "cascade")); - multiphysInteg_->setLoadBalancerManager(std::move(lbm_)); - - + auto loadBalancer_db = std::make_shared("LoadBalancerDB"); + double flexible_load_tolerance + = initializer::dict_get(dict, "simulation/advanced/integrator/flexible_load_tolerance", .5); + + loadBalancer_db->putDouble("flexible_load_tolerance", flexible_load_tolerance); + auto loadBalancer = std::make_shared( + SAMRAI::tbox::Dimension{dimension}, "LoadBalancer", loadBalancer_db); + + PHARE_LOG_LINE_STR(dict["simulation"]["AMR"]["refinement"].contains("tagging")); + if (dict["simulation"]["AMR"]["refinement"].contains("tagging")) + { // Load balancers break with refinement boxes - only tagging supported + /* + P=0000000:Program abort called in file ``/.../SAMRAI/xfer/RefineSchedule.cpp'' at line 369 + P=0000000:ERROR MESSAGE: + P=0000000:RefineSchedule:RefineSchedule error: We are not currently + P=0000000:supporting RefineSchedules with the source level finer + P=0000000:than the destination level + */ + lbm_->addLoadBalancerEstimator(0, maxLevelNumber_ - 1, std::move(lbe_)); + lbm_->setLoadBalancer(loadBalancer); + } + multiphysInteg_->setLoadBalancerManager(std::move(lbm_)); if (dict["simulation"].contains("restarts")) startTime_ = restarts_init(dict["simulation"]["restarts"]); integrator_ = std::make_unique(dict, hierarchy_, multiphysInteg_, multiphysInteg_, - startTime_, finalTime_); + loadBalancer, startTime_, finalTime_); timeStamper = core::TimeStamperFactory::create(dict["simulation"]); diff --git a/tests/simulator/test_load_balancing.py b/tests/simulator/test_load_balancing.py index 91cab4b5c..5c8e8d1a8 100644 --- a/tests/simulator/test_load_balancing.py +++ b/tests/simulator/test_load_balancing.py @@ -21,14 +21,15 @@ cpp = cpp_lib() startMPI() -time_step_nbr = 1 +time_step_nbr = 2 time_step = 0.005 smallest_patch_size = 10 largest_patch_size = 20 rebalance_coarsest = False cells = (100, 100) +dl = (0.2, 0.2) diag_outputs = "phare_outputs/harris/2d/load_balancing" -timestamps = [0, 0.005] +timestamps = [0, 0.005, 0.01] def config(): @@ -38,7 +39,7 @@ def config(): time_step_nbr=time_step_nbr, time_step=time_step, cells=cells, - dl=(0.2, 0.2), + dl=dl, refinement_boxes={}, hyper_resistivity=0.001, resistivity=0.001, @@ -46,9 +47,23 @@ def config(): "format": "phareh5", "options": {"dir": diag_outputs, "mode": "overwrite"}, }, - advanced={"integrator/rebalance_coarsest": rebalance_coarsest}, + advanced={ + "integrator/rebalance_coarsest": rebalance_coarsest, + # "integrator/flexible_load_tolerance": 0.001, + }, ) + + def ppc_by_icell(x, y): + # print("ppc_by_icell(icell)", x) + # print("ppc_by_icell(icell)", x.shape, y.shape) + ppc = y.copy() + ppc[:] = 95 + # print("ppc", ppc) + ppc[np.where(np.isclose(y, 10, atol=0.1))] = 345 + # print("ppc", ppc) + return ppc + def density(x, y): L = sim.simulation_domain()[1] return ( @@ -130,7 +145,7 @@ def vthz(x, y): "vthx": vthx, "vthy": vthy, "vthz": vthz, - "nbr_part_per_cell": 100, + "nbr_part_per_cell": (ppc_by_icell, 100), } ph.MaxwellianFluidModel( @@ -148,7 +163,7 @@ def vthz(x, y): return sim -def get_time(path, time, datahier=None): +def get_time(path, time=0, datahier=None): time = "{:.10f}".format(time) from pyphare.pharesee.hierarchy import hierarchy_from @@ -207,17 +222,43 @@ def test_balance(sim, time): make_fig(hier, f"lb_t{time}", 0, ppc_collections) +def print_time(sim, time=0): + hier = get_time(diag_outputs, time) + + per_rank = {f"p{rank}": 0 for rank in range(10)} + + def _parse_rank(patch_id): + return patch_id.split("#")[0] + + c = 0 + for ilvl, lvl in hier.levels().items(): + for patch in lvl: + for pd_key, pd in patch.patch_datas.items(): + print("patch particles", patch.box, pd.size()) + c += pd.size() + + per_rank[_parse_rank(patch.id)] += pd.size() + + print("total", c) + print("per_rank", per_rank) + + def test_particles_have_evolved(sim): - init = get_merged_particles(sim) - end = get_merged_particles(sim, time_step) + print_time(sim, time=0) + print_time(sim, time=time_step) + print_time(sim, time=time_step * 2) - assert init != end + # init = get_merged_particles(sim) + # end = get_merged_particles(sim, time_step) + # assert init != end def main(): sim = config() Simulator(sim).run() + if cpp.mpi_rank() > 0: + return test_particles_have_evolved(sim) # test_balance(sim, timestamps[cpp.mpi_rank() + 1])