Skip to content

Commit

Permalink
Cmake option (#98)
Browse files Browse the repository at this point in the history
* add cmake option for pulsar

* remove unused var

* add CI compilation

* eol

* explicit amrex space, use literals in namepsace
  • Loading branch information
RevathiJambunathan authored May 19, 2022
1 parent 18a41d9 commit e68d066
Show file tree
Hide file tree
Showing 7 changed files with 79 additions and 28 deletions.
30 changes: 30 additions & 0 deletions .github/workflows/cuda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,21 @@ jobs:
PYWARPX_LIB_DIR=$PWD/build_sp/lib python3 -m pip wheel .
python3 -m pip install *.whl
cmake -S . -B build_pulsar \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DWarpX_COMPUTE=CUDA \
-DWarpX_EB=OFF \
-DWarpX_LIB=OFF \
-DAMReX_CUDA_ARCH=6.0 \
-DWarpX_OPENPMD=OFF \
-DWarpX_openpmd_internal=OFF \
-DWarpX_PRECISION=DOUBLE \
-DWarpX_PSATD=OFF \
-DPULSAR=ON \
-DAMReX_CUDA_ERROR_CROSS_EXECUTION_SPACE_CALL=ON \
-DAMReX_CUDA_ERROR_CAPTURE_THIS=ON
cmake --build build_sp -j 2
# make sure legacy build system continues to build, i.e., that we don't forget
# to add new .cpp files
build_nvcc_gnumake:
Expand Down Expand Up @@ -108,6 +123,7 @@ jobs:
git clone https://github.com/AMReX-Codes/amrex.git ../amrex
cd amrex && git checkout --detach 22.05 && cd -
make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2
make COMP=gcc QED=FALSE USE_OPENPMD=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=FALSE PULSAR=TRUE USE_CCACHE=TRUE -j 2
build_nvhpc21-11-nvcc:
name: [email protected] NVCC/NVC++ Release [tests]
Expand Down Expand Up @@ -165,6 +181,20 @@ jobs:
-DAMReX_CUDA_ERROR_CAPTURE_THIS=ON
cmake --build build -j 2
cmake -S . -B build_pulsar \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DWarpX_COMPUTE=CUDA \
-DWarpX_EB=OFF \
-DWarpX_LIB=ON \
-DAMReX_CUDA_ARCH=8.0 \
-DWarpX_OPENPMD=OFF \
-DWarpX_PSATD=OFF \
-DPULSAR=ON \
-DAMReX_CUDA_ERROR_CROSS_EXECUTION_SPACE_CALL=ON \
-DAMReX_CUDA_ERROR_CAPTURE_THIS=ON
cmake --build build -j 2
# work-around for mpi4py 3.1.1 build system issue with using
# a GNU-built Python executable with non-GNU Python modules
# https://github.com/mpi4py/mpi4py/issues/114
Expand Down
7 changes: 7 additions & 0 deletions .github/workflows/ubuntu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,13 @@ jobs:
-DWarpX_MPI=OFF \
-DWarpX_QED=OFF
cmake --build build_3D -j 2
cmake -S . -B build_3D_PULSAR \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DWarpX_EB=OFF \
-DWarpX_MPI=OFF \
-DWarpX_QED=OFF \
-DPULSAR=ON
cmake --build build_3D_PULSAR -j 2
cmake -S . -B build_RZ \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DWarpX_DIMS=RZ \
Expand Down
9 changes: 9 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ option(WarpX_PSATD "spectral solver support" OFF)
option(WarpX_SENSEI "SENSEI in situ diagnostics" OFF)
option(WarpX_QED "QED support (requires PICSAR)" ON)
option(WarpX_QED_TABLE_GEN "QED table generation (requires PICSAR and Boost)" OFF)
option(PULSAR "pulsar simulations support" OFF)

set(WarpX_DIMS_VALUES 1 2 3 RZ)
set(WarpX_DIMS 3 CACHE STRING "Simulation dimensionality (1/2/3/RZ)")
Expand Down Expand Up @@ -249,6 +250,10 @@ if(WarpX_QED)
target_link_libraries(ablastr PUBLIC PXRMP_QED::PXRMP_QED)
endif()

if(PULSAR)
target_compile_definitions(WarpX PUBLIC PULSAR)
endif()

# AMReX helper function: propagate CUDA specific target & source properties
if(WarpX_COMPUTE STREQUAL CUDA)
foreach(warpx_tgt IN LISTS _ALL_TARGETS)
Expand Down Expand Up @@ -302,6 +307,10 @@ if(WarpX_QED)
endif()
endif()

if(PULSAR)
target_compile_definitions(WarpX PUBLIC PULSAR)
endif()

if(WarpX_PSATD)
target_compile_definitions(ablastr PUBLIC WARPX_USE_PSATD)
endif()
Expand Down
1 change: 1 addition & 0 deletions Source/Particles/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ target_sources(WarpX
WarpXParticleContainer.cpp
LaserParticleContainer.cpp
ParticleBoundaryBuffer.cpp
PulsarParameters.cpp
)

#add_subdirectory(Algorithms)
Expand Down
2 changes: 1 addition & 1 deletion Source/Particles/PulsarParameters.H
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#include <AMReX_Parser.H>
#include <list>

using namespace amrex;
using namespace amrex::literals;


class Pulsar {
Expand Down
53 changes: 26 additions & 27 deletions Source/Particles/PulsarParameters.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ Pulsar::InitializeConductorMultifabUsingParser(
const amrex::GpuArray<amrex::Real, AMREX_SPACEDIM> dx_lev = warpx.Geom(lev).CellSizeArray();
const amrex::RealBox& real_box = warpx.Geom(lev).ProbDomain();
amrex::IntVect iv = mf->ixType().toIntVect();
for ( amrex::MFIter mfi(*mf, TilingIfNotGPU()); mfi.isValid(); ++mfi) {
for ( amrex::MFIter mfi(*mf, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) {
//InitializeGhost Cells also
const amrex::Box& tb = mfi.tilebox(iv, mf->nGrowVect());
amrex::Array4<amrex::Real> const& conductor_fab = mf->array(mfi);
Expand Down Expand Up @@ -448,10 +448,10 @@ Pulsar::InitializeExternalPulsarFieldsOnGrid ( amrex::MultiFab *mfx, amrex::Mult
amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect();
amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect();
amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect();
GpuArray<int, 3> x_IndexType;
GpuArray<int, 3> y_IndexType;
GpuArray<int, 3> z_IndexType;
GpuArray<amrex::Real, AMREX_SPACEDIM> center_star_arr;
amrex::GpuArray<int, 3> x_IndexType;
amrex::GpuArray<int, 3> y_IndexType;
amrex::GpuArray<int, 3> z_IndexType;
amrex::GpuArray<amrex::Real, AMREX_SPACEDIM> center_star_arr;
for (int idim = 0; idim < 3; ++idim) {
x_IndexType[idim] = x_nodal_flag[idim];
y_IndexType[idim] = y_nodal_flag[idim];
Expand All @@ -471,7 +471,7 @@ Pulsar::InitializeExternalPulsarFieldsOnGrid ( amrex::MultiFab *mfx, amrex::Mult
#ifdef AMREX_USE_OMP
#pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
#endif
for (MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi)
for (amrex::MFIter mfi(*mfx, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
{
const amrex::Box& tbx = mfi.tilebox(x_nodal_flag, mfx->nGrowVect() );
const amrex::Box& tby = mfi.tilebox(y_nodal_flag, mfy->nGrowVect() );
Expand Down Expand Up @@ -658,10 +658,10 @@ Pulsar::ApplyCorotatingEfield_BC ( std::array< std::unique_ptr<amrex::MultiFab>,
amrex::IntVect x_nodal_flag = Efield[0]->ixType().toIntVect();
amrex::IntVect y_nodal_flag = Efield[1]->ixType().toIntVect();
amrex::IntVect z_nodal_flag = Efield[2]->ixType().toIntVect();
GpuArray<int, 3> x_IndexType;
GpuArray<int, 3> y_IndexType;
GpuArray<int, 3> z_IndexType;
GpuArray<amrex::Real, AMREX_SPACEDIM> center_star_arr;
amrex::GpuArray<int, 3> x_IndexType;
amrex::GpuArray<int, 3> y_IndexType;
amrex::GpuArray<int, 3> z_IndexType;
amrex::GpuArray<amrex::Real, AMREX_SPACEDIM> center_star_arr;
for (int idim = 0; idim < 3; ++idim) {
x_IndexType[idim] = x_nodal_flag[idim];
y_IndexType[idim] = y_nodal_flag[idim];
Expand All @@ -679,7 +679,7 @@ Pulsar::ApplyCorotatingEfield_BC ( std::array< std::unique_ptr<amrex::MultiFab>,
#ifdef AMREX_USE_OMP
#pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
#endif
for (MFIter mfi(*Efield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi)
for (amrex::MFIter mfi(*Efield[0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
{
const amrex::Box& tex = mfi.tilebox(x_nodal_flag);
const amrex::Box& tey = mfi.tilebox(y_nodal_flag);
Expand Down Expand Up @@ -811,10 +811,10 @@ Pulsar::ApplyDipoleBfield_BC ( std::array< std::unique_ptr<amrex::MultiFab>, 3>
amrex::IntVect x_nodal_flag = Bfield[0]->ixType().toIntVect();
amrex::IntVect y_nodal_flag = Bfield[1]->ixType().toIntVect();
amrex::IntVect z_nodal_flag = Bfield[2]->ixType().toIntVect();
GpuArray<int, 3> x_IndexType;
GpuArray<int, 3> y_IndexType;
GpuArray<int, 3> z_IndexType;
GpuArray<amrex::Real, AMREX_SPACEDIM> center_star_arr;
amrex::GpuArray<int, 3> x_IndexType;
amrex::GpuArray<int, 3> y_IndexType;
amrex::GpuArray<int, 3> z_IndexType;
amrex::GpuArray<amrex::Real, AMREX_SPACEDIM> center_star_arr;
for (int idim = 0; idim < 3; ++idim) {
x_IndexType[idim] = x_nodal_flag[idim];
y_IndexType[idim] = y_nodal_flag[idim];
Expand All @@ -832,7 +832,7 @@ Pulsar::ApplyDipoleBfield_BC ( std::array< std::unique_ptr<amrex::MultiFab>, 3>
#ifdef AMREX_USE_OMP
#pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
#endif
for (MFIter mfi(*Bfield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi)
for (amrex::MFIter mfi(*Bfield[0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
{
const amrex::Box& tbx = mfi.tilebox(x_nodal_flag);
const amrex::Box& tby = mfi.tilebox(y_nodal_flag);
Expand Down Expand Up @@ -1013,7 +1013,7 @@ Pulsar::SetTangentialEforInternalConductor( std::array <std::unique_ptr<amrex::M
#ifdef AMREX_USE_OMP
#pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
#endif
for (MFIter mfi(*Efield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi)
for (amrex::MFIter mfi(*Efield[0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
{
const amrex::Box& tex = mfi.tilebox(x_nodal_flag);
const amrex::Box& tey = mfi.tilebox(y_nodal_flag);
Expand Down Expand Up @@ -1093,7 +1093,7 @@ Pulsar::ComputePlasmaNumberDensity ()
}


const Geometry& geom = warpx.Geom(lev);
const amrex::Geometry& geom = warpx.Geom(lev);
const auto dx = geom.CellSizeArray();
#if defined WARPX_DIM_3D
amrex::Real inv_vol = 1._rt/(dx[0] * dx[1] * dx[2]);
Expand All @@ -1104,7 +1104,7 @@ Pulsar::ComputePlasmaNumberDensity ()
#ifdef AMREX_USE_OMP
#pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
#endif
for ( MFIter mfi(*m_plasma_number_density[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi ) {
for ( amrex::MFIter mfi(*m_plasma_number_density[lev], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi ) {
amrex::Array4<amrex::Real> const& density = m_plasma_number_density[0]->array(mfi);
amrex::Box const& tbx = mfi.tilebox();
const int ncomps = m_plasma_number_density[lev]->nComp();
Expand Down Expand Up @@ -1132,7 +1132,7 @@ Pulsar::ComputePlasmaMagnetization ()
const amrex::MultiFab& Bx_mf = warpx.getBfield(lev, 0);
const amrex::MultiFab& By_mf = warpx.getBfield(lev, 1);
const amrex::MultiFab& Bz_mf = warpx.getBfield(lev, 2);
for (MFIter mfi(*m_magnetization[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi)
for (amrex::MFIter mfi(*m_magnetization[lev], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
{
const amrex::Box& bx = mfi.tilebox();
amrex::Array4<amrex::Real> const& mag = m_magnetization[lev]->array(mfi);
Expand Down Expand Up @@ -1191,7 +1191,7 @@ Pulsar::TuneSigma0Threshold (const int step)
for (int isp = 0; isp < nspecies; ++isp) {
amrex::Real ws_total = 0._rt;
auto& pc = warpx.GetPartContainer().GetParticleContainer(isp);
amrex::ReduceOps<ReduceOpSum> reduce_ops;
amrex::ReduceOps<amrex::ReduceOpSum> reduce_ops;
amrex::Real cur_time = warpx.gett_new(0);
auto ws_r = amrex::ParticleReduce<
amrex::ReduceData < amrex::ParticleReal> >
Expand Down Expand Up @@ -1292,7 +1292,7 @@ Pulsar::TotalParticles ()
for (int isp = 0; isp < nspecies; ++isp) {
auto& pc = warpx.GetPartContainer().GetParticleContainer(isp);
amrex::Long np_total = pc.TotalNumberOfParticles();
amrex::ParallelDescriptor::ReduceLongSum(np_total, ParallelDescriptor::IOProcessorNumber());
amrex::ParallelDescriptor::ReduceLongSum(np_total);
total_particles += np_total;
}
}
Expand All @@ -1308,7 +1308,6 @@ Pulsar::PrintInjectedCellValues ()
{
auto& warpx = WarpX::GetInstance();
std::vector species_names = warpx.GetPartContainer().GetSpeciesNames();
const int nspecies = species_names.size();
int total_injected_cells = static_cast<int>(SumInjectionFlag());
// x, y, z, r, theta, phi, injection_flag, magnetization, ndens_p, ndens_e, Bx, By, Bz, Bmag, rho
int total_diags = 15;
Expand All @@ -1323,7 +1322,7 @@ Pulsar::PrintInjectedCellValues ()
const amrex::MultiFab& injectionflag_mf = *m_injection_flag[lev];
const amrex::MultiFab& magnetization_mf = *m_magnetization[lev];
const amrex::MultiFab& ndens_mf = *m_plasma_number_density[lev];
GpuArray<amrex::Real, AMREX_SPACEDIM> center_star_arr;
amrex::GpuArray<amrex::Real, AMREX_SPACEDIM> center_star_arr;
for (int idim = 0; idim < 3; ++idim) {
center_star_arr[idim] = m_center_star[idim];
}
Expand All @@ -1332,9 +1331,9 @@ Pulsar::PrintInjectedCellValues ()
rho = mypc.GetChargeDensity(lev, true);
amrex::MultiFab & rho_mf = *rho;

Gpu::DeviceScalar<int> cell_counter(0);
amrex::Gpu::DeviceScalar<int> cell_counter(0);
int* cell_counter_d = cell_counter.dataPtr();
for (MFIter mfi(injectionflag_mf, TilingIfNotGPU()); mfi.isValid(); ++mfi)
for (amrex::MFIter mfi(injectionflag_mf, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi)
{
const amrex::Box & bx = mfi.tilebox();
amrex::Array4<const amrex::Real> const& Bx = Bx_mf[mfi].array();
Expand Down Expand Up @@ -1386,7 +1385,7 @@ Pulsar::PrintInjectedCellValues ()
}
amrex::Print() << " counter : " << cell_counter.dataValue() << " total cells injected " << total_injected_cells << "\n";
std::stringstream ss;
ss << Concatenate("InjectionCellData", warpx.getistep(0), 5);
ss << amrex::Concatenate("InjectionCellData", warpx.getistep(0), 5);
amrex::AllPrintToFile(ss.str()) << " cell_index x y z r theta phi injection magnetization ndens_p ndens_e Bx By Bz Bmag rho \n" ;
for (int icell = 0; icell < total_injected_cells; ++icell ) {
if (InjectedCellDiagData[icell*total_diags + 6] == 1) {
Expand Down
5 changes: 5 additions & 0 deletions cmake/WarpXFunctions.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,10 @@ function(set_warpx_binary_name)
set_property(TARGET ${tgt} APPEND_STRING PROPERTY OUTPUT_NAME ".PSATD")
endif()

if(PULSAR)
set_property(TARGET ${tgt} APPEND_STRING PROPERTY OUTPUT_NAME ".PULSAR")
endif()

if(WarpX_EB)
set_property(TARGET ${tgt} APPEND_STRING PROPERTY OUTPUT_NAME ".EB")
endif()
Expand Down Expand Up @@ -373,6 +377,7 @@ function(warpx_print_summary)
message(" MPI (thread multiple): ${WarpX_MPI_THREAD_MULTIPLE}")
endif()
message(" PSATD: ${WarpX_PSATD}")
message(" PULSAR: ${PULSAR}")
message(" PRECISION: ${WarpX_PRECISION}")
message(" OPENPMD: ${WarpX_OPENPMD}")
message(" QED: ${WarpX_QED}")
Expand Down

0 comments on commit e68d066

Please sign in to comment.