From 766a521bea1a840b9ff25ef328dd971dfaa0f9f5 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Tue, 5 Mar 2024 16:24:23 -0800 Subject: [PATCH 01/10] Enable `WarpX::ComputeEdgeLengths` in RZ (#4749) * Enable `WarpX::ComputeEdgeLengths` in RZ * Update exit message when using EBs in RZ with fully electromagnetic solvers. --- Source/EmbeddedBoundary/WarpXInitEB.cpp | 12 ++++-------- Source/WarpX.cpp | 5 +++-- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index 990ff1445ed..c3036b59aa4 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -114,16 +114,15 @@ WarpX::InitEB () void WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& edge_lengths, const amrex::EBFArrayBoxFactory& eb_fact) { -#ifndef WARPX_DIM_RZ BL_PROFILE("ComputeEdgeLengths"); auto const &flags = eb_fact.getMultiEBCellFlagFab(); auto const &edge_centroid = eb_fact.getEdgeCent(); -#ifdef WARPX_DIM_XZ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) edge_lengths[1]->setVal(0.); #endif for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi){ -#ifdef WARPX_DIM_XZ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) for (int idim = 0; idim < 3; ++idim){ if(idim == 1) continue; #elif defined(WARPX_DIM_3D) @@ -148,7 +147,7 @@ WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& ed edge_lengths_dim(i, j, k) = 0.; }); } else { -#ifdef WARPX_DIM_XZ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) int idim_amrex = idim; if(idim == 2) idim_amrex = 1; auto const &edge_cent = edge_centroid[idim_amrex]->const_array(mfi); @@ -175,7 +174,6 @@ WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& ed } } } -#endif } @@ -246,11 +244,10 @@ WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face void WarpX::ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengths, const std::array& cell_size) { -#ifndef WARPX_DIM_RZ BL_PROFILE("ScaleEdges"); for (amrex::MFIter mfi(*edge_lengths[0]); mfi.isValid(); ++mfi) { -#ifdef WARPX_DIM_XZ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) for (int idim = 0; idim < 3; ++idim){ if(idim == 1) continue; #elif defined(WARPX_DIM_3D) @@ -267,7 +264,6 @@ WarpX::ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengt }); } } -#endif } void diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 195a6e5fe10..d453fa6f941 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -752,8 +752,9 @@ WarpX::ReadParameters () #if defined(AMREX_USE_EB) && defined(WARPX_DIM_RZ) WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - electromagnetic_solver_id==ElectromagneticSolverAlgo::None, - "Currently, the embedded boundary in RZ only works for electrostatic solvers (or no solver)."); + electromagnetic_solver_id==ElectromagneticSolverAlgo::None + || electromagnetic_solver_id==ElectromagneticSolverAlgo::HybridPIC, + "Currently, the embedded boundary in RZ only works for electrostatic solvers, the Ohm's law solver or with no solver installed."); #endif if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || From 3982b0005e82c030b2476dc5efd0ff65a18f6fa1 Mon Sep 17 00:00:00 2001 From: Eya D <81635404+EyaDammak@users.noreply.github.com> Date: Wed, 6 Mar 2024 16:25:25 -0800 Subject: [PATCH 02/10] New Python test: Particle-Boundary interaction (#4729) * enable the diagnostic of ParticleScraping in Python * Update picmi.py * Update picmi.py * new test * python update * modification of the script * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update PICMI_inputs_rz.py * json * update * Update PICMI_inputs_rz.py * Update particle_boundary_interaction.json * Update PICMI_inputs_rz.py * Update PICMI_inputs_rz.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update PICMI_inputs_rz.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix hanging script in parallel * Make the test executable * Update analysis script * Update particle_containers.py * Update PICMI_inputs_rz.py * Update analysis.py * Update analysis.py * Update particle_containers.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Remi Lehe Co-authored-by: Axel Huebl --- .../PICMI_inputs_rz.py | 169 ++++++++++++++++++ .../particle_boundary_interaction/analysis.py | 51 ++++++ Python/pywarpx/particle_containers.py | 3 + .../particle_boundary_interaction.json | 18 ++ Regression/WarpX-tests.ini | 21 +++ 5 files changed, 262 insertions(+) create mode 100644 Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py create mode 100755 Examples/Tests/particle_boundary_interaction/analysis.py create mode 100644 Regression/Checksum/benchmarks_json/particle_boundary_interaction.json diff --git a/Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py b/Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py new file mode 100644 index 00000000000..4017a05d413 --- /dev/null +++ b/Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python3 +# @Eya Dammak supervised by @Remi Lehe, 2024 +# --- Input file for particle-boundary interaction testing in RZ. +# --- This input is a simple case of reflection +# --- of one electron on the surface of a sphere. +import numpy as np + +from pywarpx import callbacks, particle_containers, picmi + +########################## +# numerics parameters +########################## + +dt = 1.0e-11 + +# --- Nb time steps + +max_steps = 23 +diagnostic_interval = 1 + +# --- grid + +nr = 64 +nz= 64 + +rmin = 0.0 +rmax = 2 +zmin = -2 +zmax = 2 + +########################## +# numerics components +########################## + +grid = picmi.CylindricalGrid( + number_of_cells = [nr, nz], + n_azimuthal_modes = 1, + lower_bound = [rmin, zmin], + upper_bound = [rmax, zmax], + lower_boundary_conditions = ['none', 'dirichlet'], + upper_boundary_conditions = ['dirichlet', 'dirichlet'], + lower_boundary_conditions_particles = ['absorbing', 'reflecting'], + upper_boundary_conditions_particles = ['absorbing', 'reflecting'] +) + + +solver = picmi.ElectrostaticSolver( + grid=grid, method='Multigrid', + warpx_absolute_tolerance=1e-7 +) + +embedded_boundary = picmi.EmbeddedBoundary( + implicit_function="-(x**2+y**2+z**2-radius**2)", + radius = 0.2 +) + +########################## +# physics components +########################## + +#one particle +e_dist=picmi.ParticleListDistribution(x=0.0, y=0.0, z=-0.25, ux=0.5e10, uy=0.0, uz=1.0e10, weight=1) + +electrons = picmi.Species( + particle_type='electron', name='electrons', initial_distribution=e_dist, warpx_save_particles_at_eb=1 +) + +########################## +# diagnostics +########################## + +field_diag = picmi.FieldDiagnostic( + name = 'diag1', + grid = grid, + period = diagnostic_interval, + data_list = ['Er', 'Ez', 'phi', 'rho','rho_electrons'], + warpx_format = 'openpmd', + write_dir = '.', + warpx_file_prefix = 'particle_boundary_interaction_plt' +) + +part_diag = picmi.ParticleDiagnostic(name = 'diag1', + period = diagnostic_interval, + species = [electrons], + warpx_format = 'openpmd', + write_dir = '.', + warpx_file_prefix = 'particle_boundary_interaction_plt' +) + +########################## +# simulation setup +########################## + +sim = picmi.Simulation( + solver=solver, + time_step_size = dt, + max_steps = max_steps, + warpx_embedded_boundary=embedded_boundary, + warpx_amrex_the_arena_is_managed=1, +) + +sim.add_species( + electrons, + layout = picmi.GriddedLayout( + n_macroparticle_per_cell=[10, 1, 1], grid=grid + ) +) +sim.add_diagnostic(part_diag) +sim.add_diagnostic(field_diag) + +sim.initialize_inputs() +sim.initialize_warpx() + +########################## +# python particle data access +########################## + +def concat( list_of_arrays ): + if len(list_of_arrays) == 0: + # Return a 1d array of size 0 + return np.empty(0) + else: + return np.concatenate( list_of_arrays ) + + +def mirror_reflection(): + buffer = particle_containers.ParticleBoundaryBufferWrapper() #boundary buffer + + #STEP 1: extract the different parameters of the boundary buffer (normal, time, position) + lev = 0 # level 0 (no mesh refinement here) + delta_t = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'deltaTimeScraped', lev)) + r = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'x', lev)) + theta = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'theta', lev)) + z = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'z', lev)) + x= r*np.cos(theta) #from RZ coordinates to 3D coordinates + y= r*np.sin(theta) + ux = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'ux', lev)) + uy = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'uy', lev)) + uz = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'uz', lev)) + w = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'w', lev)) + nx = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'nx', lev)) + ny = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'ny', lev)) + nz = concat(buffer.get_particle_boundary_buffer("electrons", 'eb', 'nz', lev)) + + #STEP 2: use these parameters to inject particle from the same position in the plasma + elect_pc = particle_containers.ParticleContainerWrapper('electrons') #general particle container + + ####this part is specific to the case of simple reflection. + un=ux*nx+uy*ny+uz*nz + ux_reflect=-2*un*nx+ux #for a "mirror reflection" u(sym)=-2(u.n)n+u + uy_reflect=-2*un*ny+uy + uz_reflect=-2*un*nz+uz + elect_pc.add_particles( + x=x + (dt-delta_t)*ux_reflect, y=y + (dt-delta_t)*uy_reflect, z=z + (dt-delta_t)*uz_reflect, + ux=ux_reflect, uy=uy_reflect, uz=uz_reflect, + w=w + ) #adds the particle in the general particle container at the next step + #### Can be modified depending on the model of interaction. + + buffer.clear_buffer() #reinitialise the boundary buffer + +callbacks.installafterstep(mirror_reflection) #mirror_reflection is called at the next step + # using the new particle container modified at the last step + +########################## +# simulation run +########################## + +sim.step(max_steps) #the whole process is done "max_steps" times diff --git a/Examples/Tests/particle_boundary_interaction/analysis.py b/Examples/Tests/particle_boundary_interaction/analysis.py new file mode 100755 index 00000000000..9768751ed74 --- /dev/null +++ b/Examples/Tests/particle_boundary_interaction/analysis.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# @Eya Dammak supervised by @Remi Lehe, 2024 +""" +This script tests the last coordinate after adding an electron. +The sphere is centered on O and has a radius of 0.2 (EB) +The electron is initially at: (0,0,-0.25) and moves with a velocity: +(0.5e10,0,1.0e10) with a time step of 1e-11. +An input file PICMI_inputs_rz.py is used. +""" +import os +import sys + +import numpy as np +from openpmd_viewer import OpenPMDTimeSeries +import yt + +yt.funcs.mylog.setLevel(0) +sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +import checksumAPI + +# Open plotfile specified in command line +filename = sys.argv[1] +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename, output_format='openpmd') + +ts = OpenPMDTimeSeries('./particle_boundary_interaction_plt') + +it=ts.iterations +x,y,z=ts.get_particle(['x','y','z'], species='electrons', iteration=it[-1]) + +# Analytical results calculated +x_analytic=0.03532 +y_analytic=0.00000 +z_analytic=-0.20531 + +print('NUMERICAL coordinates of the point of contact:') +print('x=%5.5f, y=%5.5f, z=%5.5f' % (x[0], y[0], z[0])) +print('\n') +print('ANALYTICAL coordinates of the point of contact:') +print('x=%5.5f, y=%5.5f, z=%5.5f' % (x_analytic, y_analytic, z_analytic)) + +tolerance=1e-5 + +diff_x=np.abs((x[0]-x_analytic)/x_analytic) +diff_z=np.abs((z[0]-z_analytic)/z_analytic) + +print('\n') +print("percentage error for x = %5.4f %%" %(diff_x *100)) +print("percentage error for z = %5.4f %%" %(diff_z *100)) + +assert (diff_x < tolerance) and (y[0] < 1e-8) and (diff_z < tolerance), 'Test particle_boundary_interaction did not pass' diff --git a/Python/pywarpx/particle_containers.py b/Python/pywarpx/particle_containers.py index 19b0a1ab6c4..f8e687f99bd 100644 --- a/Python/pywarpx/particle_containers.py +++ b/Python/pywarpx/particle_containers.py @@ -761,6 +761,9 @@ def get_particle_boundary_buffer(self, species_name, boundary, comp_name, level) The data for the arrays are not copied, but share the underlying memory buffer with WarpX. The arrays are fully writeable. + You can find `here https://github.com/ECP-WarpX/WarpX/blob/319e55b10ad4f7c71b84a4fb21afbafe1f5b65c2/Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py` + an example of a simple case of particle-boundary interaction (reflection). + Parameters ---------- diff --git a/Regression/Checksum/benchmarks_json/particle_boundary_interaction.json b/Regression/Checksum/benchmarks_json/particle_boundary_interaction.json new file mode 100644 index 00000000000..e35516758f2 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/particle_boundary_interaction.json @@ -0,0 +1,18 @@ +{ + "electrons": { + "particle_momentum_x": 7.082238783412152e-21, + "particle_momentum_y": 0.0, + "particle_momentum_z": 7.319015172219235e-21, + "particle_position_x": 0.03532003602713794, + "particle_position_y": 0.0, + "particle_position_z": 0.20531131195378569, + "particle_weight": 1.0 + }, + "lev=0": { + "Er": 1.3287679891086577e-06, + "Ez": 1.791737740110229e-06, + "phi": 3.3983929077087634e-07, + "rho": 7.811529217958718e-16, + "rho_electrons": 7.811529217958718e-16 + } +} diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 426ecb0717c..ab422d5631a 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -4681,3 +4681,24 @@ doVis = 0 compareParticles = 0 doComparison = 0 analysisRoutine = Examples/Tests/gaussian_beam/analysis_focusing_beam.py + +[particle_boundary_interaction] +buildDir = . +inputFile = Examples/Tests/particle_boundary_interaction/PICMI_inputs_rz.py +runtime_params = +customRunCmd = python3 PICMI_inputs_rz.py +dim = 2 +addToCompileString = USE_PYTHON_MAIN=TRUE USE_RZ=TRUE +cmakeSetupOpts = -DWarpX_DIMS="RZ" -DWarpX_EB=ON -DWarpX_PYTHON=ON +target = pip_install +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons +outputFile = particle_boundary_interaction_plt +analysisRoutine = Examples/Tests/particle_boundary_interaction/analysis.py From 642aa5480ec6afc036e1e5133a8cb65e7739feca Mon Sep 17 00:00:00 2001 From: Eya D <81635404+EyaDammak@users.noreply.github.com> Date: Wed, 6 Mar 2024 16:32:11 -0800 Subject: [PATCH 03/10] Adding normal components to regular boundary buffer (#4742) * first draft * adding normal only * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ParticleBoundaryBuffer.cpp * Update ParticleBoundaryBuffer.cpp --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Source/Particles/ParticleBoundaryBuffer.cpp | 23 +++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp index f991f211d28..d1f9f814bc5 100644 --- a/Source/Particles/ParticleBoundaryBuffer.cpp +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -1,5 +1,5 @@ /* Copyright 2021 Andrew Myers - * + * modified by Remi Lehe, Eya Dammak 2023 * This file is part of WarpX. * * License: BSD-3-Clause-LBNL @@ -22,6 +22,7 @@ #include #include #include +#include using namespace amrex::literals; struct IsOutsideDomainBoundary { @@ -164,8 +165,12 @@ struct FindEmbeddedBoundaryIntersection { struct CopyAndTimestamp { int m_step_index; int m_delta_index; + int m_normal_index; int m_step; const amrex::Real m_dt; + int m_idim; + int m_iside; + template AMREX_GPU_HOST_DEVICE @@ -182,8 +187,16 @@ struct CopyAndTimestamp { for (int j = 0; j < src.m_num_runtime_int; ++j) { dst.m_runtime_idata[j][dst_i] = src.m_runtime_idata[j][src_i]; } + dst.m_runtime_idata[m_step_index][dst_i] = m_step; dst.m_runtime_rdata[m_delta_index][dst_i] = 0._rt; //delta_fraction is initialized to zero + + //calculation of the normal to the boundary + std::array n = {0.0, 0.0, 0.0}; + n[m_idim]=1-2*m_iside; + dst.m_runtime_rdata[m_normal_index][dst_i]= n[0]; + dst.m_runtime_rdata[m_normal_index+1][dst_i]= n[1]; + dst.m_runtime_rdata[m_normal_index+2][dst_i]= n[2]; } }; @@ -351,7 +364,6 @@ void ParticleBoundaryBuffer::gatherParticles (MultiParticleContainer& mypc, const amrex::Geometry& geom = warpx_instance.Geom(0); auto plo = geom.ProbLoArray(); auto phi = geom.ProbHiArray(); - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { if (geom.isPeriodic(idim)) { continue; } @@ -367,6 +379,9 @@ void ParticleBoundaryBuffer::gatherParticles (MultiParticleContainer& mypc, buffer[i] = pc.make_alike(); buffer[i].AddIntComp("stepScraped", false); buffer[i].AddRealComp("deltaTimeScraped", false); + buffer[i].AddRealComp("nx", false); + buffer[i].AddRealComp("ny", false); + buffer[i].AddRealComp("nz", false); } auto& species_buffer = buffer[i]; for (int lev = 0; lev < pc.numLevels(); ++lev) @@ -412,11 +427,11 @@ void ParticleBoundaryBuffer::gatherParticles (MultiParticleContainer& mypc, const int step_scraped_index = string_to_index_intcomp.at("stepScraped"); auto string_to_index_realcomp = buffer[i].getParticleRuntimeComps(); const int delta_index = string_to_index_realcomp.at("deltaTimeScraped"); + const int normal_index = string_to_index_realcomp.at("nx"); const int step = warpx_instance.getistep(0); - amrex::filterAndTransformParticles(ptile_buffer, ptile, predicate, - CopyAndTimestamp{step_scraped_index, delta_index, step, dt}, + CopyAndTimestamp{step_scraped_index, delta_index, normal_index, step, dt, idim, iside}, 0, dst_index); } } From a551793464237297f766a694579ec17f5354edec Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Wed, 6 Mar 2024 16:42:11 -0800 Subject: [PATCH 04/10] Add function to set domain boundary potentials from Python (#4740) * add function to set domain boundary potentials from Python * switch function arguments to form `potential_[lo/hi]_[x/y/z]` and add to docs --- Docs/source/usage/workflows/python_extend.rst | 6 +++++ Source/Python/WarpX.cpp | 22 +++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/Docs/source/usage/workflows/python_extend.rst b/Docs/source/usage/workflows/python_extend.rst index d3049508da6..47610e0d7ba 100644 --- a/Docs/source/usage/workflows/python_extend.rst +++ b/Docs/source/usage/workflows/python_extend.rst @@ -90,6 +90,12 @@ This object is the Python equivalent to the C++ ``WarpX`` simulation class. .. py:method:: get_particle_boundary_buffer + .. py:method:: set_potential_on_domain_boundary(potential_[lo/hi]_[x/y/z]: str) + + The potential on the domain boundaries can be modified when using the electrostatic solver. + This function updates the strings and function parsers which set the domain + boundary potentials during the Poisson solve. + .. py:method:: set_potential_on_eb(potential: str) The embedded boundary (EB) conditions can be modified when using the electrostatic solver. diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index e42b8268fe1..4b0ad56a0ce 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -170,6 +170,28 @@ The physical fields in WarpX have the following naming: "Get the current physical time step size on mesh-refinement level ``lev``." ) + .def("set_potential_on_domain_boundary", + [](WarpX& wx, + std::string potential_lo_x, std::string potential_hi_x, + std::string potential_lo_y, std::string potential_hi_y, + std::string potential_lo_z, std::string potential_hi_z) + { + if (potential_lo_x != "") wx.m_poisson_boundary_handler.potential_xlo_str = potential_lo_x; + if (potential_hi_x != "") wx.m_poisson_boundary_handler.potential_xhi_str = potential_hi_x; + if (potential_lo_y != "") wx.m_poisson_boundary_handler.potential_ylo_str = potential_lo_y; + if (potential_hi_y != "") wx.m_poisson_boundary_handler.potential_yhi_str = potential_hi_y; + if (potential_lo_z != "") wx.m_poisson_boundary_handler.potential_zlo_str = potential_lo_z; + if (potential_hi_z != "") wx.m_poisson_boundary_handler.potential_zhi_str = potential_hi_z; + wx.m_poisson_boundary_handler.buildParsers(); + }, + py::arg("potential_lo_x") = "", + py::arg("potential_hi_x") = "", + py::arg("potential_lo_y") = "", + py::arg("potential_hi_y") = "", + py::arg("potential_lo_z") = "", + py::arg("potential_hi_z") = "", + "Sets the domain boundary potential string(s) and updates the function parser." + ) .def("set_potential_on_eb", [](WarpX& wx, std::string potential) { wx.m_poisson_boundary_handler.setPotentialEB(potential); From 0c6c452fc3b23e7e1b4ddc3c8293f53f98146cf4 Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Wed, 6 Mar 2024 16:57:50 -0800 Subject: [PATCH 05/10] clean up `ablastr/fields` (#4753) * move PoissonInterpCPtoFP to Interpolate.H * concatenate nested namespaces --- Source/ablastr/fields/Interpolate.H | 55 +++++++++++++++++++++ Source/ablastr/fields/PoissonSolver.H | 36 +------------- Source/ablastr/fields/VectorPoissonSolver.H | 2 +- 3 files changed, 57 insertions(+), 36 deletions(-) create mode 100644 Source/ablastr/fields/Interpolate.H diff --git a/Source/ablastr/fields/Interpolate.H b/Source/ablastr/fields/Interpolate.H new file mode 100644 index 00000000000..a9f0a7fc75f --- /dev/null +++ b/Source/ablastr/fields/Interpolate.H @@ -0,0 +1,55 @@ +/* Copyright 2019-2022 Axel Huebl, Remi Lehe + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef ABLASTR_INTERPOLATE_H +#define ABLASTR_INTERPOLATE_H + +#include +#include +#include + +#include + +#include +#include + + +namespace ablastr::fields::details { + + /** Local interpolation from phi_cp to phi[lev+1] + * + * This is needed to work-around an NVCC limitation in downstream code (ImpactX), + * when nesting lambdas. Otherwise this could be written directly into the + * ParallelFor. + * + * @param[out] phi_fp_arr phi on the fine level + * @param[in] phi_cp_arr phi on the coarse level + * @param[in] refratio refinement ration + */ + struct PoissonInterpCPtoFP + { + PoissonInterpCPtoFP( + amrex::Array4 const phi_fp_arr, + amrex::Array4 const phi_cp_arr, + amrex::IntVect const refratio) + : m_phi_fp_arr(phi_fp_arr), m_phi_cp_arr(phi_cp_arr), m_refratio(refratio) + {} + + AMREX_GPU_DEVICE AMREX_FORCE_INLINE + void + operator() (int i, int j, int k) const noexcept + { + amrex::mf_nodebilin_interp(i, j, k, 0, m_phi_fp_arr, 0, m_phi_cp_arr, + 0, m_refratio); + } + + amrex::Array4 const m_phi_fp_arr; + amrex::Array4 const m_phi_cp_arr; + amrex::IntVect const m_refratio; + }; +} // namespace ablastr::fields::details + +#endif // ABLASTR_INTERPOLATE_H diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index 62dccc6a09d..9c6053bed07 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -52,41 +53,6 @@ namespace ablastr::fields { -namespace details -{ - /** Local interpolation from phi_cp to phi[lev+1] - * - * This is needed to work-around an NVCC limitation in downstream code (ImpactX), - * when nesting lambdas. Otherwise this could be written directly into the - * ParallelFor. - * - * @param[out] phi_fp_arr phi on the fine level - * @param[in] phi_cp_arr phi on the coarse level - * @param[in] refratio refinement ration - */ - struct PoissonInterpCPtoFP - { - PoissonInterpCPtoFP( - amrex::Array4 const phi_fp_arr, - amrex::Array4 const phi_cp_arr, - amrex::IntVect const refratio) - : m_phi_fp_arr(phi_fp_arr), m_phi_cp_arr(phi_cp_arr), m_refratio(refratio) - {} - - AMREX_GPU_DEVICE AMREX_FORCE_INLINE - void - operator() (int i, int j, int k) const noexcept - { - amrex::mf_nodebilin_interp(i, j, k, 0, m_phi_fp_arr, 0, m_phi_cp_arr, - 0, m_refratio); - } - - amrex::Array4 const m_phi_fp_arr; - amrex::Array4 const m_phi_cp_arr; - amrex::IntVect const m_refratio; - }; -} - /** Compute the potential `phi` by solving the Poisson equation * * Uses `rho` as a source, assuming that the source moves at a diff --git a/Source/ablastr/fields/VectorPoissonSolver.H b/Source/ablastr/fields/VectorPoissonSolver.H index 8f9021456eb..6e68d0fadb5 100644 --- a/Source/ablastr/fields/VectorPoissonSolver.H +++ b/Source/ablastr/fields/VectorPoissonSolver.H @@ -8,7 +8,7 @@ #define ABLASTR_VECTOR_POISSON_SOLVER_H #include -#include +#include #include #include #include From 5e0d0ec716e9f32aa9e88dd49773ff96c73ededb Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 8 Mar 2024 06:52:01 +0100 Subject: [PATCH 06/10] Split clang-tidy CI test into 4 to improve performances (#4747) * split clang-tidy checks to improve performances * rename folders and tests * fix concurrency * Simplify --------- Co-authored-by: Axel Huebl --- .github/workflows/clang_tidy.yml | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index c166941f6e2..5abb42352f4 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -8,7 +8,10 @@ concurrency: jobs: run_clang_tidy: - name: clang-tidy + strategy: + matrix: + dim: [1, 2, RZ, 3] + name: clang-tidy-${{ matrix.dim }}D runs-on: ubuntu-22.04 if: github.event.pull_request.draft == false steps: @@ -35,16 +38,16 @@ jobs: export CXX=$(which clang++-15) export CC=$(which clang-15) - cmake -S . -B build_clang_tidy \ - -DCMAKE_VERBOSE_MAKEFILE=ON \ - -DWarpX_DIMS="1;2;RZ;3" \ - -DWarpX_MPI=ON \ - -DWarpX_COMPUTE=OMP \ - -DWarpX_PSATD=ON \ - -DWarpX_QED=ON \ - -DWarpX_QED_TABLE_GEN=ON \ - -DWarpX_OPENPMD=ON \ - -DWarpX_PRECISION=SINGLE \ + cmake -S . -B build_clang_tidy \ + -DCMAKE_VERBOSE_MAKEFILE=ON \ + -DWarpX_DIMS="${{ matrix.dim }}" \ + -DWarpX_MPI=ON \ + -DWarpX_COMPUTE=OMP \ + -DWarpX_PSATD=ON \ + -DWarpX_QED=ON \ + -DWarpX_QED_TABLE_GEN=ON \ + -DWarpX_OPENPMD=ON \ + -DWarpX_PRECISION=SINGLE \ -DCMAKE_CXX_COMPILER_LAUNCHER=ccache cmake --build build_clang_tidy -j 4 From 510f016771fbfdad49d50277dfd5ec57f3ffc230 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Fri, 8 Mar 2024 01:16:54 -0800 Subject: [PATCH 07/10] Replace links to learn git (#4758) * Replace links to learn git --- CONTRIBUTING.rst | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index a3a6a3046a9..77b8200b0d5 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -10,10 +10,7 @@ Git workflow ------------ The WarpX project uses `git `_ for version control. -If you are new to git, you can follow one of these tutorials: - -- `Learn git with bitbucket `_ -- `git - the simple guide `_ +If you are new to git, you can follow `this tutorial `__. Configure your GitHub Account & Development Machine ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 887a16733611f088ebeb8821304f1735a5a05405 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Fri, 8 Mar 2024 06:00:53 -0800 Subject: [PATCH 08/10] Bugfix in `fields.py` for GPU run without `cupy` (#4750) * Bugfix in `fields.py` for GPU run without `cupy` * apply suggestion from code review --- Python/pywarpx/fields.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index dc0de9d9491..0f680595ef4 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -47,6 +47,7 @@ import cupy as cp except ImportError: cp = None +from .LoadThirdParty import load_cupy try: from mpi4py import MPI as mpi @@ -516,6 +517,12 @@ def __setitem__(self, index, value): if not isinstance(ic , slice) or len(global_shape) < 4: global_shape[3:3] = [1] value3d.shape = global_shape + if libwarpx.libwarpx_so.Config.have_gpu: + # check if cupy is available for use + xp, cupy_status = load_cupy() + if cupy_status is not None: + libwarpx.amr.Print(cupy_status) + starts = [ixstart, iystart, izstart] stops = [ixstop, iystop, izstop] for mfi in self.mf: @@ -526,7 +533,7 @@ def __setitem__(self, index, value): slice_value = value3d[global_slices] if libwarpx.libwarpx_so.Config.have_gpu: # Copy data from host to device - slice_value = cp.asarray(slice_value) + slice_value = xp.asarray(slice_value) mf_arr[block_slices] = slice_value else: mf_arr[block_slices] = value From ebbe63496274f1a65a1dae3c2daf56bf1d6c1d87 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sat, 9 Mar 2024 01:06:24 +0900 Subject: [PATCH 09/10] Release 24.03 (#4759) * AMReX: 24.03 * pyAMReX: 24.03 * WarpX: 24.03 --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Docs/source/conf.py | 4 ++-- Python/setup.py | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/pyAMReX.cmake | 4 ++-- run_test.sh | 2 +- setup.py | 2 +- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 4b23ef931db..5e3a3077f5d 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -115,7 +115,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 3525b4a3f27eb64f746dd69b6613f71bb02d6e63 && cd - + cd ../amrex && git checkout --detach 24.03 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/CMakeLists.txt b/CMakeLists.txt index 3a947b01dcd..4ddc685c733 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.20.0) -project(WarpX VERSION 24.02) +project(WarpX VERSION 24.03) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index b34c437b829..1a439c8b76e 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -103,9 +103,9 @@ def __init__(self, *args, **kwargs): # built documents. # # The short X.Y version. -version = u'24.02' +version = u'24.03' # The full version, including alpha/beta/rc tags. -release = u'24.02' +release = u'24.03' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/Python/setup.py b/Python/setup.py index 3c6c0f605c0..08e616cc279 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -54,7 +54,7 @@ package_data = {} setup(name = 'pywarpx', - version = '24.02', + version = '24.03', packages = ['pywarpx'], package_dir = {'pywarpx': 'pywarpx'}, description = """Wrapper of WarpX""", diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 40338dc1480..2f0c7b607bb 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 3525b4a3f27eb64f746dd69b6613f71bb02d6e63 +branch = 24.03 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index ab422d5631a..1f88db26d7a 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 3525b4a3f27eb64f746dd69b6613f71bb02d6e63 +branch = 24.03 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 5b225de6d50..a1045457ec0 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -250,7 +250,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 24.02 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) + find_package(AMReX 24.03 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) # note: TINYP skipped because user-configured and optional # AMReX CMake helper scripts @@ -273,7 +273,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "3525b4a3f27eb64f746dd69b6613f71bb02d6e63" +set(WarpX_amrex_branch "24.03" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index e8748f2e920..6360da84276 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -64,7 +64,7 @@ function(find_pyamrex) endif() elseif(NOT WarpX_pyamrex_internal) # TODO: MPI control - find_package(pyAMReX 24.02 CONFIG REQUIRED) + find_package(pyAMReX 24.03 CONFIG REQUIRED) message(STATUS "pyAMReX: Found version '${pyAMReX_VERSION}'") endif() endfunction() @@ -79,7 +79,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "bfb599fd8361f8ef0765c487fd7bb69409bf78af" +set(WarpX_pyamrex_branch "24.03" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") diff --git a/run_test.sh b/run_test.sh index b5cdc627e35..f556dd37e76 100755 --- a/run_test.sh +++ b/run_test.sh @@ -68,7 +68,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 3525b4a3f27eb64f746dd69b6613f71bb02d6e63 && cd - +cd amrex && git checkout --detach 24.03 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git # openPMD-example-datasets contains various required data sets diff --git a/setup.py b/setup.py index 197a39ce23f..b0f919d707a 100644 --- a/setup.py +++ b/setup.py @@ -278,7 +278,7 @@ def build_extension(self, ext): setup( name='pywarpx', # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version = '24.02', + version = '24.03', packages = ['pywarpx'], package_dir = {'pywarpx': 'Python/pywarpx'}, author='Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.', From e38acc4da155087b3cdccb502fc808639c0c5c30 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Fri, 8 Mar 2024 18:12:04 -0800 Subject: [PATCH 10/10] Implement stair-case Yee solver with EB in RZ geometry (#2707) * Allow compilation with RZ EB * Do not push cells for RZ Yee solver, when covered with EB * Fix compilation errors * Fix additional compilation errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix additional compilation errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add automated test * Add automated test * Fix path in tests * Enable parser in RZ * Update example script * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Clean-up PR * Initialize EB quantities * Modified EM field initialization in 2D with EB * Typo fix * Typo fix * Ignoring unused variables correctly * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Correct condition for updating E * Correct update of B * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update B push * Update input script * Revert "Update input script" This reverts commit 5087485db606f77806c08849c51af9be635ca622. * Update initialization * Updated test * Move test to a different folder * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add test for WarpX-test.ini * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix path for tests * Update test description * Update test metadata * Add checksum file * Revert changes * Revert changes * Change lx to lr * Revert "Change lx to lr" This reverts commit be3039af229d5dee509c8de0a712addf099a2b82. * Change lx to lr --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: lgiacome --- .../analysis_fields.py | 41 ++++++++++++++++++ .../embedded_boundary_diffraction/inputs_rz | 42 +++++++++++++++++++ .../EmbeddedBoundaryDiffraction.json | 10 +++++ Regression/WarpX-tests.ini | 18 ++++++++ Source/EmbeddedBoundary/WarpXInitEB.cpp | 16 +++---- .../FiniteDifferenceSolver/EvolveB.cpp | 4 +- .../FiniteDifferenceSolver/EvolveE.cpp | 27 ++++++++++-- .../FiniteDifferenceSolver.H | 1 + Source/WarpX.cpp | 7 ---- 9 files changed, 144 insertions(+), 22 deletions(-) create mode 100755 Examples/Tests/embedded_boundary_diffraction/analysis_fields.py create mode 100644 Examples/Tests/embedded_boundary_diffraction/inputs_rz create mode 100644 Regression/Checksum/benchmarks_json/EmbeddedBoundaryDiffraction.json diff --git a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py new file mode 100755 index 00000000000..da344f332a1 --- /dev/null +++ b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 +""" +This test checks the implementation of the embedded boundary in cylindrical geometry, +by checking the diffraction of a laser by an embedded boundary here. +We then check that the first minimum of the diffracted intensity pattern +occurs along the angle given by the theoretical Airy pattern, i.e. +theta_diffraction = 1.22 * lambda / d +""" +import os +import sys + +import numpy as np +from openpmd_viewer import OpenPMDTimeSeries +from scipy.ndimage import gaussian_filter1d + +sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +import checksumAPI + +ts = OpenPMDTimeSeries('./EmbeddedBoundaryDiffraction_plt/') + +# Extract the intensity as a function of r and z +Ex, info = ts.get_field('E', 'x', iteration=300) +I = gaussian_filter1d(Ex**2, sigma=5, axis=0) # Extract intensity by averaging E^2 over wavelength +irmax = np.argmax( I, axis=-1) + +# Find the radius of the first minimum, as a function of z +def r_first_minimum(iz): + ir = len(info.r)//2 + while I[iz, ir+1] < I[iz, ir]: + ir += 1 + return info.r[ir] +r = np.array([ r_first_minimum(iz) for iz in range(len(info.z)) ]) + +# Check that this corresponds to the prediction from the Airy pattern +theta_diffraction = np.arcsin(1.22*0.1/0.4)/2 +assert np.all( abs(r[50:] - theta_diffraction*info.z[50:]) < 0.03 ) + +# Open the right plot file +filename = sys.argv[1] +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename, output_format='openpmd') diff --git a/Examples/Tests/embedded_boundary_diffraction/inputs_rz b/Examples/Tests/embedded_boundary_diffraction/inputs_rz new file mode 100644 index 00000000000..705c9641dfb --- /dev/null +++ b/Examples/Tests/embedded_boundary_diffraction/inputs_rz @@ -0,0 +1,42 @@ +# This script tests the diffraction of a laser by +# a cylindrical object, represented by an embedded boundary here + +max_step = 300 +amr.n_cell = 128 256 +amr.max_grid_size = 256 +amr.max_level = 0 + +geometry.dims = RZ +geometry.prob_lo = 0. -0.2 +geometry.prob_hi = 2 1.4 +warpx.cfl = 0.99 +algo.particle_shape = 1 + +boundary.field_lo = none absorbing_silver_mueller +boundary.field_hi = pec absorbing_silver_mueller + +# Make the cylindrical object that the laser will diffract on +my_constants.aperture_l = 0.01 +my_constants.aperture_r = 0.2 +warpx.eb_implicit_function = "if( (abs(z)<0.5*aperture_l) and (x, 3 >& ed void WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face_areas, const amrex::EBFArrayBoxFactory& eb_fact) { -#ifndef WARPX_DIM_RZ BL_PROFILE("ComputeFaceAreas"); auto const &flags = eb_fact.getMultiEBCellFlagFab(); -#ifdef WARPX_DIM_XZ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In 2D the volume frac is actually the area frac. auto const &area_frac = eb_fact.getVolFrac(); #elif defined(WARPX_DIM_3D) @@ -194,12 +193,12 @@ WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face "ComputeFaceAreas: Only implemented in 2D3V and 3D3V"); #endif -#ifdef WARPX_DIM_XZ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) face_areas[0]->setVal(0.); face_areas[2]->setVal(0.); #endif for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi) { -#ifdef WARPX_DIM_XZ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) // In 2D we change the extrema of the for loop so that we only have the case idim=1 for (int idim = 1; idim < AMREX_SPACEDIM; ++idim) { #elif defined(WARPX_DIM_3D) @@ -223,7 +222,7 @@ WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face face_areas_dim(i, j, k) = amrex::Real(0.); }); } else { -#ifdef WARPX_DIM_XZ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) auto const &face = area_frac.const_array(mfi); #elif defined(WARPX_DIM_3D) auto const &face = area_frac[idim]->const_array(mfi); @@ -237,7 +236,6 @@ WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face } } } -#endif } @@ -269,13 +267,12 @@ WarpX::ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengt void WarpX::ScaleAreas(std::array< std::unique_ptr, 3 >& face_areas, const std::array& cell_size) { -#ifndef WARPX_DIM_RZ BL_PROFILE("ScaleAreas"); amrex::Real full_area; for (amrex::MFIter mfi(*face_areas[0]); mfi.isValid(); ++mfi) { -#ifdef WARPX_DIM_XZ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) // In 2D we change the extrema of the for loop so that we only have the case idim=1 for (int idim = 1; idim < AMREX_SPACEDIM; ++idim) { #elif defined(WARPX_DIM_3D) @@ -286,7 +283,7 @@ WarpX::ScaleAreas(std::array< std::unique_ptr, 3 >& face_areas, #endif const amrex::Box& box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), face_areas[idim]->nGrowVect() ); -#ifdef WARPX_DIM_XZ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) full_area = cell_size[0]*cell_size[2]; #elif defined(WARPX_DIM_3D) if (idim == 0) { @@ -308,7 +305,6 @@ WarpX::ScaleAreas(std::array< std::unique_ptr, 3 >& face_areas, } } -#endif } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp index 4425e655917..fbc1397b413 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp @@ -59,8 +59,8 @@ void FiniteDifferenceSolver::EvolveB ( std::array< std::unique_ptr >, 3 >& borrowing, int lev, amrex::Real const dt ) { -#ifndef AMREX_USE_EB - amrex::ignore_unused(area_mod, ECTRhofield, Venl, flag_info_cell, borrowing); +#if defined(WARPX_DIM_RZ) || !defined(AMREX_USE_EB) + amrex::ignore_unused(area_mod, ECTRhofield, Venl, flag_info_cell, borrowing); #endif // Select algorithm (The choice of algorithm is a runtime option, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp index e52cca846bb..5f707fbc927 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp @@ -67,8 +67,7 @@ void FiniteDifferenceSolver::EvolveE ( // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee){ - ignore_unused(edge_lengths); - EvolveECylindrical ( Efield, Bfield, Jfield, Ffield, lev, dt ); + EvolveECylindrical ( Efield, Bfield, Jfield, edge_lengths, Ffield, lev, dt ); #else if (m_grid_type == GridType::Collocated) { @@ -181,7 +180,6 @@ void FiniteDifferenceSolver::EvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ - #ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries if (lz(i,j,k) <= 0) return; @@ -235,9 +233,14 @@ void FiniteDifferenceSolver::EvolveECylindrical ( std::array< std::unique_ptr, 3 >& Efield, std::array< std::unique_ptr, 3 > const& Bfield, std::array< std::unique_ptr, 3 > const& Jfield, + std::array< std::unique_ptr, 3 > const& edge_lengths, std::unique_ptr const& Ffield, int lev, amrex::Real const dt ) { +#ifndef AMREX_USE_EB + amrex::ignore_unused(edge_lengths); +#endif + amrex::LayoutData* cost = WarpX::getCosts(lev); // Loop through the grids, and over the tiles within each grid @@ -262,6 +265,11 @@ void FiniteDifferenceSolver::EvolveECylindrical ( Array4 const& jt = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); +#ifdef AMREX_USE_EB + amrex::Array4 const& lr = edge_lengths[0]->array(mfi); + amrex::Array4 const& lz = edge_lengths[2]->array(mfi); +#endif + // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); auto const n_coefs_r = static_cast(m_stencil_coefs_r.size()); @@ -284,6 +292,10 @@ void FiniteDifferenceSolver::EvolveECylindrical ( amrex::ParallelFor(ter, tet, tez, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ +#ifdef AMREX_USE_EB + // Skip field push if this cell is fully covered by embedded boundaries + if (lr(i, j, 0) <= 0) return; +#endif Real const r = rmin + (i + 0.5_rt)*dr; // r on cell-centered point (Er is cell-centered in r) Er(i, j, 0, 0) += c2 * dt*( - T_Algo::DownwardDz(Bt, coefs_z, n_coefs_z, i, j, 0, 0) @@ -301,6 +313,11 @@ void FiniteDifferenceSolver::EvolveECylindrical ( }, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ +#ifdef AMREX_USE_EB + // Skip field push if this cell is fully covered by embedded boundaries + // The Et field is at a node, so we need to check if the node is covered + if (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0) return; +#endif Real const r = rmin + i*dr; // r on a nodal grid (Et is nodal in r) if (r != 0) { // Off-axis, regular Maxwell equations Et(i, j, 0, 0) += c2 * dt*( @@ -342,6 +359,10 @@ void FiniteDifferenceSolver::EvolveECylindrical ( }, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ +#ifdef AMREX_USE_EB + // Skip field push if this cell is fully covered by embedded boundaries + if (lz(i, j, 0) <= 0) return; +#endif Real const r = rmin + i*dr; // r on a nodal grid (Ez is nodal in r) if (r != 0) { // Off-axis, regular Maxwell equations Ez(i, j, 0, 0) += c2 * dt*( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 861b2648c1e..d045a30dd44 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -213,6 +213,7 @@ class FiniteDifferenceSolver std::array< std::unique_ptr, 3 >& Efield, std::array< std::unique_ptr, 3 > const& Bfield, std::array< std::unique_ptr, 3 > const& Jfield, + std::array< std::unique_ptr, 3 > const& edge_lengths, std::unique_ptr const& Ffield, int lev, amrex::Real dt ); diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index d453fa6f941..6c3186c5c8d 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -750,13 +750,6 @@ WarpX::ReadParameters () electromagnetic_solver_id = ElectromagneticSolverAlgo::None; } -#if defined(AMREX_USE_EB) && defined(WARPX_DIM_RZ) - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - electromagnetic_solver_id==ElectromagneticSolverAlgo::None - || electromagnetic_solver_id==ElectromagneticSolverAlgo::HybridPIC, - "Currently, the embedded boundary in RZ only works for electrostatic solvers, the Ohm's law solver or with no solver installed."); -#endif - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) {