diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index d6c81c34404..a9def19c995 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -507,3 +507,13 @@ @article{Rhee1987 url = {https://doi.org/10.1063/1.1139314}, eprint = {https://pubs.aip.org/aip/rsi/article-pdf/58/2/240/19154912/240\_1\_online.pdf}, } + +@misc{holmstrom2013handlingvacuumregionshybrid, + title={Handling vacuum regions in a hybrid plasma solver}, + author={M. Holmstrom}, + year={2013}, + eprint={1301.0272}, + archivePrefix={arXiv}, + primaryClass={physics.space-ph}, + url={https://arxiv.org/abs/1301.0272}, +} diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index aaba7130b87..2bff856b4b7 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2535,6 +2535,27 @@ Maxwell solver: kinetic-fluid hybrid * ``hybrid_pic_model.substeps`` (`int`) optional (default ``10``) If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the number of sub-steps to take during the B-field update. +* ``hybrid_pic_model.holmstrom_vacuum_region`` (`bool`) optional (default ``false``) + If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the vacuum region handling of the generalized Ohm's Law to suppress vacuum fluctuations. :cite:t:`param-holmstrom2013handlingvacuumregionshybrid`. + +* ``hybid_pic_model.add_external_fields`` (`bool`) optional (default ``false``) + If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the hybrid solver to use split external fields defined in external_vector_potential inputs. + +* ``external_vector_potential.fields`` (list of `str`) optional (default ``empty``) + If ``hybid_pic_model.add_external_fields`` is set to ``true``, this adds a list names for external time varying vector potentials to be added to hybrid solver. + +* ``external_vector_potential..read_from_file`` (`bool`) optional (default ``false``) + If ``hybid_pic_model.add_external_fields`` is set to ``true``, this flag determines whether to load an external field or use an implcit function to evaluate teh time varying field. + +* ``external_vector_potential..path`` (`str`) optional (default ``""``) + If ``external_vector_potential..read_from_file`` is set to ``true``, sets the path to an OpenPMD file that can be loaded externally in :math:`weber/m`. + +* ``external_vector_potential..A[x,y,z]_external_grid_function(x,y,z)`` (`str`) optional (default ``"0"``) + If ``external_vector_potential..read_from_file`` is set to ``false``, Sets the external vector potential to be populated by an implicit function (on the grid) in :math:`weber/m`. + +* ``external_vector_potential..A_time_external_grid_function(t)`` (`str`) optional (default ``"1"``) + This sets the relative strength of the external vector potential by a dimensionless implicit time function, which can compute the external B fields and E fields based on the value and first time derivative of the function. + .. note:: Based on results from :cite:t:`param-Stanier2020` it is recommended to use diff --git a/Examples/Tests/CMakeLists.txt b/Examples/Tests/CMakeLists.txt index 5ff1d4a9a70..b80e6158f49 100644 --- a/Examples/Tests/CMakeLists.txt +++ b/Examples/Tests/CMakeLists.txt @@ -41,6 +41,7 @@ add_subdirectory(nci_fdtd_stability) add_subdirectory(nci_psatd_stability) add_subdirectory(nodal_electrostatic) add_subdirectory(nuclear_fusion) +add_subdirectory(ohm_solver_cylinder_compression) add_subdirectory(ohm_solver_em_modes) add_subdirectory(ohm_solver_ion_beam_instability) add_subdirectory(ohm_solver_ion_Landau_damping) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt new file mode 100644 index 00000000000..86596a92a87 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_ohm_solver_cylinder_compression_picmi # name + 3 # dims + 2 # nprocs + "inputs_test_3d_ohm_solver_cylinder_compression_picmi.py --test" # inputs + OFF # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum + OFF # dependency +) +label_warpx_test(test_3d_ohm_solver_cylinder_compression_picmi slow) + +add_warpx_test( + test_rz_ohm_solver_cylinder_compression_picmi # name + RZ # dims + 2 # nprocs + "inputs_test_rz_ohm_solver_cylinder_compression_picmi.py --test" # inputs + OFF # analysis + "analysis_default_regression.py --path diags/diag1000020" # output + OFF # dependency +) +label_warpx_test(test_rz_ohm_solver_cylinder_compression_picmi slow) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py b/Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py new file mode 100644 index 00000000000..4d0ab4b2474 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -0,0 +1,393 @@ +#!/usr/bin/env python3 +# +# --- Test script for the kinetic-fluid hybrid model in WarpX wherein ions are +# --- treated as kinetic particles and electrons as an isothermal, inertialess +# --- background fluid. The script demonstrates the use of this model to +# --- simulate adiabatic compression of a plasma cylinder initialized from an +# --- analytical Grad-Shafranov solution. + +import argparse +import shutil +import sys +from pathlib import Path + +import numpy as np +import openpmd_api as io +from mpi4py import MPI as mpi + +from pywarpx import fields, picmi + +constants = picmi.constants + +comm = mpi.COMM_WORLD + +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False) + + +class PlasmaCylinderCompression(object): + # B0 is chosen with all other quantities scaled by it + n0 = 1e20 + T_i = 10 # eV + T_e = 0 + p0 = n0 * constants.q_e * T_i + + B0 = np.sqrt(2 * constants.mu0 * p0) # Initial magnetic field strength (T) + + # Do a 2x uniform B-field compression + dB = B0 + + # Flux Conserver radius + R_c = 0.5 + + # Plasma Radius (These values match GS solution in gs_psi.csv) + R_p = 0.25 + delta_p = 0.025 + + # Domain parameters + LX = 2.0 * R_c * 1.05 # m + LY = 2.0 * R_c * 1.05 + LZ = 0.5 # m + + LT = 10 # ion cyclotron periods + DT = 1e-3 # ion cyclotron periods + + # Resolution parameters + NX = 256 + NY = 256 + NZ = 128 + + # Starting number of particles per cell + NPPC = 100 + + # Number of substeps used to update B + substeps = 20 + + def Bz(self, r): + return np.sqrt( + self.B0**2 + - 2.0 + * constants.mu0 + * self.n0 + * constants.q_e + * self.T_i + / (1.0 + np.exp((r - self.R_p) / self.delta_p)) + ) + + def __init__(self, test, verbose): + self.test = test + self.verbose = verbose or self.test + + self.Lx = self.LX + self.Ly = self.LY + self.Lz = self.LZ + + self.DX = self.LX / self.NX + self.DY = self.LY / self.NY + self.DZ = self.LZ / self.NZ + + if comm.rank == 0: + # Write uniform compression dataset to OpenPMD to exercise reading openPMD data + # for the time varying external fields + xvec = np.linspace(-self.LX, self.LX, num=2 * self.NX) + yvec = np.linspace(-self.LY, self.LY, num=2 * self.NY) + zvec = np.linspace(-self.LZ, self.LZ, num=2 * self.NZ) + XM, YM, ZM = np.meshgrid(xvec, yvec, zvec, indexing="ij") + + RM = np.sqrt(XM**2 + YM**2) + + Ax_data = -0.5 * YM * self.dB + Ay_data = 0.5 * XM * self.dB + Az_data = np.zeros_like(RM) + + # Write vector potential to file to exercise field loading via OPenPMD + series = io.Series("Afield.h5", io.Access.create) + + it = series.iterations[0] + + A = it.meshes["A"] + A.grid_spacing = [self.DX, self.DY, self.DZ] + A.grid_global_offset = [-self.LX, -self.LY, -self.LZ] + A.grid_unit_SI = 1.0 + A.axis_labels = ["x", "y", "z"] + A.data_order = "C" + A.unit_dimension = { + io.Unit_Dimension.M: 1.0, + io.Unit_Dimension.T: -2.0, + io.Unit_Dimension.I: -1.0, + io.Unit_Dimension.L: -1.0, + } + + Ax = A["x"] + Ay = A["y"] + Az = A["z"] + + Ax.position = [0.0, 0.0] + Ay.position = [0.0, 0.0] + Az.position = [0.0, 0.0] + + Ax_dataset = io.Dataset(Ax_data.dtype, Ax_data.shape) + + Ay_dataset = io.Dataset(Ay_data.dtype, Ay_data.shape) + + Az_dataset = io.Dataset(Az_data.dtype, Az_data.shape) + + Ax.reset_dataset(Ax_dataset) + Ay.reset_dataset(Ay_dataset) + Az.reset_dataset(Az_dataset) + + Ax.store_chunk(Ax_data) + Ay.store_chunk(Ay_data) + Az.store_chunk(Az_data) + + series.flush() + series.close() + + comm.Barrier() + + # calculate various plasma parameters based on the simulation input + self.get_plasma_quantities() + + self.dt = self.DT * self.t_ci + + # run very low resolution as a CI test + if self.test: + self.total_steps = 20 + self.diag_steps = self.total_steps // 5 + self.NX = 64 + self.NY = 64 + self.NZ = 32 + else: + self.total_steps = int(self.LT / self.DT) + self.diag_steps = 100 + + # print out plasma parameters + if comm.rank == 0: + print( + f"Initializing simulation with input parameters:\n" + f"\tTi = {self.T_i:.1f} eV\n" + f"\tn0 = {self.n0:.1e} m^-3\n" + f"\tB0 = {self.B0:.2f} T\n", + f"\tDX/DY = {self.DX / self.l_i:.3f} c/w_pi\n" + f"\tDZ = {self.DZ / self.l_i:.3f} c/w_pi\n", + ) + print( + f"Plasma parameters:\n" + f"\tl_i = {self.l_i:.1e} m\n" + f"\tt_ci = {self.t_ci:.1e} s\n" + f"\tv_ti = {self.vi_th:.1e} m/s\n" + f"\tvA = {self.vA:.1e} m/s\n" + ) + print( + f"Numerical parameters:\n" + f"\tdz = {self.Lz / self.NZ:.1e} m\n" + f"\tdt = {self.dt:.1e} s\n" + f"\tdiag steps = {self.diag_steps:d}\n" + f"\ttotal steps = {self.total_steps:d}\n" + ) + + self.setup_run() + + def get_plasma_quantities(self): + """Calculate various plasma parameters based on the simulation input.""" + + # Ion mass (kg) + self.M = constants.m_p + + # Cyclotron angular frequency (rad/s) and period (s) + self.w_ci = constants.q_e * abs(self.B0) / self.M + self.t_ci = 2.0 * np.pi / self.w_ci + + # Ion plasma frequency (Hz) + self.w_pi = np.sqrt(constants.q_e**2 * self.n0 / (self.M * constants.ep0)) + + # Ion skin depth (m) + self.l_i = constants.c / self.w_pi + + # # Alfven speed (m/s): vA = B / sqrt(mu0 * n * (M + m)) = c * omega_ci / w_pi + self.vA = abs(self.B0) / np.sqrt( + constants.mu0 * self.n0 * (constants.m_e + self.M) + ) + + # calculate thermal speeds + self.vi_th = np.sqrt(self.T_i * constants.q_e / self.M) + + # Ion Larmor radius (m) + self.rho_i = self.vi_th / self.w_ci + + def load_fields(self): + Bx = fields.BxFPExternalWrapper(include_ghosts=False) + By = fields.ByFPExternalWrapper(include_ghosts=False) + Bz = fields.BzFPExternalWrapper(include_ghosts=False) + + Bx[:, :] = 0.0 + By[:, :] = 0.0 + + XM, YM, ZM = np.meshgrid( + Bz.mesh("x"), Bz.mesh("y"), Bz.mesh("z"), indexing="ij" + ) + + RM = np.sqrt(XM**2 + YM**2) + + Bz[:, :] = self.Bz(RM) + comm.Barrier() + + def setup_run(self): + """Setup simulation components.""" + + ####################################################################### + # Set geometry and boundary conditions # + ####################################################################### + + # Create grid + self.grid = picmi.Cartesian3DGrid( + number_of_cells=[self.NX, self.NY, self.NZ], + lower_bound=[-0.5 * self.Lx, -0.5 * self.Ly, -0.5 * self.Lz], + upper_bound=[0.5 * self.Lx, 0.5 * self.Ly, 0.5 * self.Lz], + lower_boundary_conditions=["dirichlet", "dirichlet", "periodic"], + upper_boundary_conditions=["dirichlet", "dirichlet", "periodic"], + lower_boundary_conditions_particles=["absorbing", "absorbing", "periodic"], + upper_boundary_conditions_particles=["absorbing", "absorbing", "periodic"], + warpx_max_grid_size=self.NZ, + ) + simulation.time_step_size = self.dt + simulation.max_steps = self.total_steps + simulation.current_deposition_algo = "direct" + simulation.particle_shape = 1 + simulation.use_filter = True + simulation.verbose = self.verbose + + ####################################################################### + # Field solver and external field # + ####################################################################### + # External Field definition. Sigmoid starting around 2.5 us + A_ext = { + "uniform": { + "read_from_file": True, + "path": "Afield.h5", + "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", + } + } + + self.solver = picmi.HybridPICSolver( + grid=self.grid, + gamma=1.0, + Te=self.T_e, + n0=self.n0, + n_floor=0.05 * self.n0, + plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", + plasma_hyper_resistivity=1e-8, + substeps=self.substeps, + A_external=A_ext, + tau_ramp=20e-6, + t0_ramp=5e-6, + rho_floor=0.05 * self.n0 * constants.q_e, + eta_p=1e-8, + eta_v=1e-3, + ) + simulation.solver = self.solver + + simulation.embedded_boundary = picmi.EmbeddedBoundary( + implicit_function="(x**2+y**2-R_w**2)", R_w=self.R_c + ) + + # Add field loader callback + B_ext = picmi.LoadInitialFieldFromPython( + load_from_python=self.load_fields, + warpx_do_divb_cleaning_external=True, + load_B=True, + load_E=False, + ) + simulation.add_applied_field(B_ext) + + ####################################################################### + # Particle types setup # + ####################################################################### + r_omega = "(sqrt(x*x+y*y)*q_e*B0/m_p)" + dlnndr = "((-1/delta_p)/(1+exp(-(sqrt(x*x+y*y)-R_p)/delta_p)))" + vth = f"0.5*(-{r_omega}+sqrt({r_omega}*{r_omega}+4*q_e*T_i*{dlnndr}/m_p))" + + momentum_expr = [f"y*{vth}", f"-x*{vth}", "0"] + + self.ions = picmi.Species( + name="ions", + charge="q_e", + mass=self.M, + initial_distribution=picmi.AnalyticDistribution( + density_expression="n0_p/(1+exp((sqrt(x*x+y*y)-R_p)/delta_p))", + momentum_expressions=momentum_expr, + warpx_momentum_spread_expressions=[f"{str(self.vi_th)}"] * 3, + warpx_density_min=0.01 * self.n0, + R_p=self.R_p, + delta_p=self.delta_p, + n0_p=self.n0, + B0=self.B0, + T_i=self.T_i, + ), + ) + simulation.add_species( + self.ions, + layout=picmi.PseudoRandomLayout( + grid=self.grid, n_macroparticles_per_cell=self.NPPC + ), + ) + + ####################################################################### + # Add diagnostics # + ####################################################################### + + if self.test: + particle_diag = picmi.ParticleDiagnostic( + name="diag1", + period=self.diag_steps, + species=[self.ions], + data_list=["ux", "uy", "uz", "x", "z", "weighting"], + write_dir="diags", + warpx_format="plotfile", + ) + simulation.add_diagnostic(particle_diag) + field_diag = picmi.FieldDiagnostic( + name="diag1", + grid=self.grid, + period=self.diag_steps, + data_list=["B", "E", "rho"], + write_dir="diags", + warpx_format="plotfile", + ) + simulation.add_diagnostic(field_diag) + + ####################################################################### + # Initialize # + ####################################################################### + + if comm.rank == 0: + if Path.exists(Path("diags")): + shutil.rmtree("diags") + Path("diags").mkdir(parents=True, exist_ok=True) + + # Initialize inputs and WarpX instance + simulation.initialize_inputs() + simulation.initialize_warpx() + + +########################## +# parse input parameters +########################## + +parser = argparse.ArgumentParser() +parser.add_argument( + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", +) +parser.add_argument( + "-v", + "--verbose", + help="Verbose output", + action="store_true", +) +args, left = parser.parse_known_args() +sys.argv = sys.argv[:1] + left + +run = PlasmaCylinderCompression(test=args.test, verbose=args.verbose) +simulation.step() diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py new file mode 100644 index 00000000000..8c65f88ae79 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -0,0 +1,383 @@ +#!/usr/bin/env python3 +# +# --- Test script for the kinetic-fluid hybrid model in WarpX wherein ions are +# --- treated as kinetic particles and electrons as an isothermal, inertialess +# --- background fluid. The script demonstrates the use of this model to +# --- simulate adiabatic compression of a plasma cylinder initialized from an +# --- analytical Grad-Shafranov solution. + +import argparse +import shutil +import sys +from pathlib import Path + +import numpy as np +import openpmd_api as io +from mpi4py import MPI as mpi + +from pywarpx import fields, picmi + +constants = picmi.constants + +comm = mpi.COMM_WORLD + +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False) + + +class PlasmaCylinderCompression(object): + # B0 is chosen with all other quantities scaled by it + n0 = 1e20 + T_i = 10 # eV + T_e = 0 + p0 = n0 * constants.q_e * T_i + + B0 = np.sqrt(2 * constants.mu0 * p0) # External magnetic field strength (T) + + # Do a 2x uniform B-field compression + dB = B0 + + # Flux Conserver radius + R_c = 0.5 + + # Plasma Radius (These values control the analytical GS solution) + R_p = 0.25 + delta_p = 0.025 + + # Domain parameters + LR = R_c # m + LZ = 0.25 * R_c # m + + LT = 10 # ion cyclotron periods + DT = 1e-3 # ion cyclotron periods + + # Resolution parameters + NR = 128 + NZ = 32 + + # Starting number of particles per cell + NPPC = 100 + + # Number of substeps used to update B + substeps = 20 + + def Bz(self, r): + return np.sqrt( + self.B0**2 + - 2.0 + * constants.mu0 + * self.n0 + * constants.q_e + * self.T_i + / (1.0 + np.exp((r - self.R_p) / self.delta_p)) + ) + + def __init__(self, test, verbose): + self.test = test + self.verbose = verbose or self.test + + self.Lr = self.LR + self.Lz = self.LZ + + self.DR = self.LR / self.NR + self.DZ = self.LZ / self.NZ + + # Write A to OpenPMD for a uniform B field to exercise file based loader + if comm.rank == 0: + mvec = np.array([0]) + rvec = np.linspace(0, 2 * self.LR, num=2 * self.NR) + zvec = np.linspace(-self.LZ, self.LZ, num=2 * self.NZ) + MM, RM, ZM = np.meshgrid(mvec, rvec, zvec, indexing="ij") + + # Write uniform compression dataset to OpenPMD to exercise reading openPMD data + # for the time varying external fields + Ar_data = np.zeros_like(RM) + Az_data = np.zeros_like(RM) + + # Zero padded outside of domain + At_data = 0.5 * RM * self.dB + + # Write vector potential to file to exercise field loading via + series = io.Series("Afield.h5", io.Access.create) + + it = series.iterations[0] + + A = it.meshes["A"] + A.geometry = io.Geometry.thetaMode + A.geometry_parameters = "m=0" + A.grid_spacing = [self.DR, self.DZ] + A.grid_global_offset = [0.0, -self.LZ] + A.grid_unit_SI = 1.0 + A.axis_labels = ["r", "z"] + A.data_order = "C" + A.unit_dimension = { + io.Unit_Dimension.M: 1.0, + io.Unit_Dimension.T: -2.0, + io.Unit_Dimension.I: -1.0, + io.Unit_Dimension.L: -1.0, + } + + Ar = A["r"] + At = A["t"] + Az = A["z"] + + Ar.position = [0.0, 0.0] + At.position = [0.0, 0.0] + Az.position = [0.0, 0.0] + + Ar_dataset = io.Dataset(Ar_data.dtype, Ar_data.shape) + + At_dataset = io.Dataset(At_data.dtype, At_data.shape) + + Az_dataset = io.Dataset(Az_data.dtype, Az_data.shape) + + Ar.reset_dataset(Ar_dataset) + At.reset_dataset(At_dataset) + Az.reset_dataset(Az_dataset) + + Ar.store_chunk(Ar_data) + At.store_chunk(At_data) + Az.store_chunk(Az_data) + + series.flush() + series.close() + + comm.Barrier() + + # calculate various plasma parameters based on the simulation input + self.get_plasma_quantities() + + self.dt = self.DT * self.t_ci + + # run very low resolution as a CI test + if self.test: + self.total_steps = 20 + self.diag_steps = self.total_steps // 5 + self.NR = 64 + self.NZ = 16 + else: + self.total_steps = int(self.LT / self.DT) + self.diag_steps = 100 + + # print out plasma parameters + if comm.rank == 0: + print( + f"Initializing simulation with input parameters:\n" + f"\tTi = {self.T_i:.1f} eV\n" + f"\tn0 = {self.n0:.1e} m^-3\n" + f"\tB0 = {self.B0:.2f} T\n", + f"\tDR = {self.DR / self.l_i:.3f} c/w_pi\n" + f"\tDZ = {self.DZ / self.l_i:.3f} c/w_pi\n", + ) + print( + f"Plasma parameters:\n" + f"\tl_i = {self.l_i:.1e} m\n" + f"\tt_ci = {self.t_ci:.1e} s\n" + f"\tv_ti = {self.vi_th:.1e} m/s\n" + f"\tvA = {self.vA:.1e} m/s\n" + ) + print( + f"Numerical parameters:\n" + f"\tdz = {self.Lz / self.NZ:.1e} m\n" + f"\tdt = {self.dt:.1e} s\n" + f"\tdiag steps = {self.diag_steps:d}\n" + f"\ttotal steps = {self.total_steps:d}\n" + ) + + self.setup_run() + + def get_plasma_quantities(self): + """Calculate various plasma parameters based on the simulation input.""" + + # Ion mass (kg) + self.M = constants.m_p + + # Cyclotron angular frequency (rad/s) and period (s) + self.w_ci = constants.q_e * abs(self.B0) / self.M + self.t_ci = 2.0 * np.pi / self.w_ci + + # Ion plasma frequency (Hz) + self.w_pi = np.sqrt(constants.q_e**2 * self.n0 / (self.M * constants.ep0)) + + # Ion skin depth (m) + self.l_i = constants.c / self.w_pi + + # # Alfven speed (m/s): vA = B / sqrt(mu0 * n * (M + m)) = c * omega_ci / w_pi + self.vA = abs(self.B0) / np.sqrt( + constants.mu0 * self.n0 * (constants.m_e + self.M) + ) + + # calculate thermal speeds + self.vi_th = np.sqrt(self.T_i * constants.q_e / self.M) + + # Ion Larmor radius (m) + self.rho_i = self.vi_th / self.w_ci + + def load_fields(self): + Br = fields.BxFPExternalWrapper(include_ghosts=False) + Bt = fields.ByFPExternalWrapper(include_ghosts=False) + Bz = fields.BzFPExternalWrapper(include_ghosts=False) + + Br[:, :] = 0.0 + Bt[:, :] = 0.0 + + RM, ZM = np.meshgrid(Bz.mesh("r"), Bz.mesh("z"), indexing="ij") + + Bz[:, :] = self.Bz(RM) * (RM <= self.R_c) + comm.Barrier() + + def setup_run(self): + """Setup simulation components.""" + + ####################################################################### + # Set geometry and boundary conditions # + ####################################################################### + + # Create grid + self.grid = picmi.CylindricalGrid( + number_of_cells=[self.NR, self.NZ], + lower_bound=[0.0, -self.Lz / 2.0], + upper_bound=[self.Lr, self.Lz / 2.0], + lower_boundary_conditions=["none", "periodic"], + upper_boundary_conditions=["dirichlet", "periodic"], + lower_boundary_conditions_particles=["none", "periodic"], + upper_boundary_conditions_particles=["absorbing", "periodic"], + warpx_max_grid_size=self.NZ, + ) + simulation.time_step_size = self.dt + simulation.max_steps = self.total_steps + simulation.current_deposition_algo = "direct" + simulation.particle_shape = 1 + simulation.use_filter = True + simulation.verbose = self.verbose + + ####################################################################### + # Field solver and external field # + ####################################################################### + # External Field definition. Sigmoid starting around 2.5 us + A_ext = { + "uniform": { + "read_from_file": True, + "path": "Afield.h5", + "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", + } + } + + self.solver = picmi.HybridPICSolver( + grid=self.grid, + gamma=1.0, + Te=self.T_e, + n0=self.n0, + n_floor=0.05 * self.n0, + plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", + plasma_hyper_resistivity=1e-8, + substeps=self.substeps, + A_external=A_ext, + tau_ramp=20e-6, + t0_ramp=5e-6, + rho_floor=0.05 * self.n0 * constants.q_e, + eta_p=1e-8, + eta_v=1e-3, + ) + simulation.solver = self.solver + + # Add field loader callback + B_ext = picmi.LoadInitialFieldFromPython( + load_from_python=self.load_fields, + warpx_do_divb_cleaning_external=True, + load_B=True, + load_E=False, + ) + simulation.add_applied_field(B_ext) + + ####################################################################### + # Particle types setup # + ####################################################################### + r_omega = "(sqrt(x*x+y*y)*q_e*B0/m_p)" + dlnndr = "((-1/delta_p)/(1+exp(-(sqrt(x*x+y*y)-R_p)/delta_p)))" + vth = f"0.5*(-{r_omega}+sqrt({r_omega}*{r_omega}+4*q_e*T_i*{dlnndr}/m_p))" + + momentum_expr = [f"y*{vth}", f"-x*{vth}", "0"] + + self.ions = picmi.Species( + name="ions", + charge="q_e", + mass=self.M, + initial_distribution=picmi.AnalyticDistribution( + density_expression="n0_p/(1+exp((sqrt(x*x+y*y)-R_p)/delta_p))", + momentum_expressions=momentum_expr, + warpx_momentum_spread_expressions=[f"{str(self.vi_th)}"] * 3, + warpx_density_min=0.01 * self.n0, + R_p=self.R_p, + delta_p=self.delta_p, + n0_p=self.n0, + B0=self.B0, + T_i=self.T_i, + ), + ) + simulation.add_species( + self.ions, + layout=picmi.PseudoRandomLayout( + grid=self.grid, n_macroparticles_per_cell=self.NPPC + ), + ) + + ####################################################################### + # Add diagnostics # + ####################################################################### + + if self.test: + particle_diag = picmi.ParticleDiagnostic( + name="diag1", + period=self.diag_steps, + species=[self.ions], + data_list=["ux", "uy", "uz", "x", "z", "weighting"], + write_dir="diags", + warpx_format="plotfile", + ) + simulation.add_diagnostic(particle_diag) + field_diag = picmi.FieldDiagnostic( + name="diag1", + grid=self.grid, + period=self.diag_steps, + data_list=["B", "E", "rho"], + write_dir="diags", + warpx_format="plotfile", + ) + simulation.add_diagnostic(field_diag) + + ####################################################################### + # Initialize # + ####################################################################### + + if comm.rank == 0: + if Path.exists(Path("diags")): + shutil.rmtree("diags") + Path("diags").mkdir(parents=True, exist_ok=True) + + # Initialize inputs and WarpX instance + simulation.initialize_inputs() + simulation.initialize_warpx() + + +########################## +# parse input parameters +########################## + +parser = argparse.ArgumentParser() +parser.add_argument( + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", +) +parser.add_argument( + "-v", + "--verbose", + help="Verbose output", + action="store_true", +) +args, left = parser.parse_known_args() +sys.argv = sys.argv[:1] + left + +run = PlasmaCylinderCompression(test=args.test, verbose=args.verbose) +simulation.step() diff --git a/Python/pywarpx/HybridPICModel.py b/Python/pywarpx/HybridPICModel.py index 7bd8c961950..f94f44ce931 100644 --- a/Python/pywarpx/HybridPICModel.py +++ b/Python/pywarpx/HybridPICModel.py @@ -9,3 +9,4 @@ from .Bucket import Bucket hybridpicmodel = Bucket("hybrid_pic_model") +external_vector_potential = Bucket("external_vector_potential") diff --git a/Python/pywarpx/WarpX.py b/Python/pywarpx/WarpX.py index 9ef7019cda9..9b0446bcc79 100644 --- a/Python/pywarpx/WarpX.py +++ b/Python/pywarpx/WarpX.py @@ -20,7 +20,7 @@ from .Diagnostics import diagnostics, reduced_diagnostics from .EB2 import eb2 from .Geometry import geometry -from .HybridPICModel import hybridpicmodel +from .HybridPICModel import external_vector_potential, hybridpicmodel from .Interpolation import interpolation from .Lasers import lasers, lasers_list from .Particles import particles, particles_list @@ -46,6 +46,7 @@ def create_argv_list(self, **kw): argv += amrex.attrlist() argv += geometry.attrlist() argv += hybridpicmodel.attrlist() + argv += external_vector_potential.attrlist() argv += boundary.attrlist() argv += algo.attrlist() argv += interpolation.attrlist() diff --git a/Python/pywarpx/__init__.py b/Python/pywarpx/__init__.py index 054ca451756..b8e025342dd 100644 --- a/Python/pywarpx/__init__.py +++ b/Python/pywarpx/__init__.py @@ -33,7 +33,7 @@ from .Diagnostics import diagnostics, reduced_diagnostics # noqa from .EB2 import eb2 # noqa from .Geometry import geometry # noqa -from .HybridPICModel import hybridpicmodel # noqa +from .HybridPICModel import hybridpicmodel, external_vector_potential # noqa from .Interpolation import interpolation # noqa from .Lasers import lasers # noqa from .LoadThirdParty import load_cupy # noqa diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 9beef1de5c8..a81999103d9 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -578,6 +578,24 @@ def norm0(self, *args): return self.mf.norm0(*args) +def CustomNamedxWrapper(mf_name, level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name=mf_name, idir=0, level=level, include_ghosts=include_ghosts + ) + + +def CustomNamedyWrapper(mf_name, level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name=mf_name, idir=1, level=level, include_ghosts=include_ghosts + ) + + +def CustomNamedzWrapper(mf_name, level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name=mf_name, idir=2, level=level, include_ghosts=include_ghosts + ) + + def ExWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="Efield_aux", idir=0, level=level, include_ghosts=include_ghosts @@ -704,6 +722,87 @@ def BzFPExternalWrapper(level=0, include_ghosts=False): ) +def AxHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_A_fp_external", + idir=0, + level=level, + include_ghosts=include_ghosts, + ) + + +def AyHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_A_fp_external", + idir=1, + level=level, + include_ghosts=include_ghosts, + ) + + +def AzHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_A_fp_external", + idir=2, + level=level, + include_ghosts=include_ghosts, + ) + + +def ExHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_E_fp_external", + idir=0, + level=level, + include_ghosts=include_ghosts, + ) + + +def EyHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_E_fp_external", + idir=1, + level=level, + include_ghosts=include_ghosts, + ) + + +def EzHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_E_fp_external", + idir=2, + level=level, + include_ghosts=include_ghosts, + ) + + +def BxHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_B_fp_external", + idir=0, + level=level, + include_ghosts=include_ghosts, + ) + + +def ByHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_B_fp_external", + idir=1, + level=level, + include_ghosts=include_ghosts, + ) + + +def BzHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_B_fp_external", + idir=2, + level=level, + include_ghosts=include_ghosts, + ) + + def JxFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="current_fp", idir=0, level=level, include_ghosts=include_ghosts diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index da673671953..4c645a4ba75 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1853,8 +1853,37 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): substeps: int, default=100 Number of substeps to take when updating the B-field. + holmstrom_vacuum_region: bool, default=False + Flag to determine handling of vacuum region. Setting to True will solve the simplified Generalized Ohm's Law dropping the Hall and pressure terms. + This flag is useful for suppressing vacuum region fluctuations. A large resistivity value must be used when rho <= rho_floor. + Jx/y/z_external_function: str Function of space and time specifying external (non-plasma) currents. + + A_external: dict + Function of space and time specifying external (non-plasma) vector potential fields. + It is expected that a nested dicitonary will be passed + into picmi for each field that has different timings + e.g. + A_external = { + '': { + 'Ax_external_function': , + 'Ax_external_function': , + 'Ax_external_function': , + 'A_time_external_function': + }, + ': {...}' + } + + or if fields are to be loaded from an OpenPMD file + A_external = { + '': { + 'load_from_file': True, + 'path': , + 'A_time_external_function': + }, + ': {...}' + } """ def __init__( @@ -1867,9 +1896,11 @@ def __init__( plasma_resistivity=None, plasma_hyper_resistivity=None, substeps=None, + holmstrom_vacuum_region=None, Jx_external_function=None, Jy_external_function=None, Jz_external_function=None, + A_external=None, **kw, ): self.grid = grid @@ -1884,10 +1915,18 @@ def __init__( self.substeps = substeps + self.holmstrom_vacuum_region = holmstrom_vacuum_region + self.Jx_external_function = Jx_external_function self.Jy_external_function = Jy_external_function self.Jz_external_function = Jz_external_function + self.add_external_fields = None + self.A_external = A_external + + if A_external is not None: + self.add_external_fields = True + # Handle keyword arguments used in expressions self.user_defined_kw = {} for k in list(kw.keys()): @@ -1918,6 +1957,7 @@ def solver_initialize_inputs(self): ) pywarpx.hybridpicmodel.plasma_hyper_resistivity = self.plasma_hyper_resistivity pywarpx.hybridpicmodel.substeps = self.substeps + pywarpx.hybridpicmodel.holmstrom_vacuum_region = self.holmstrom_vacuum_region pywarpx.hybridpicmodel.__setattr__( "Jx_external_grid_function(x,y,z,t)", pywarpx.my_constants.mangle_expression( @@ -1936,6 +1976,50 @@ def solver_initialize_inputs(self): self.Jz_external_function, self.mangle_dict ), ) + pywarpx.hybridpicmodel.add_external_fields = self.add_external_fields + if self.add_external_fields: + pywarpx.external_vector_potential.__setattr__( + "fields", + pywarpx.my_constants.mangle_expression( + list(self.A_external.keys()), self.mangle_dict + ), + ) + for field_name, field_dict in self.A_external.items(): + if ( + "read_from_file" in field_dict.keys() + and field_dict["read_from_file"] + ): + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.read_from_file", field_dict["read_from_file"] + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.path", field_dict["path"] + ) + else: + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Ax_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Ax_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Ay_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Ay_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Az_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Az_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.A_time_external_function(t)", + pywarpx.my_constants.mangle_expression( + field_dict["A_time_external_function"], self.mangle_dict + ), + ) class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): diff --git a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json new file mode 100644 index 00000000000..6cde3a9450e --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json @@ -0,0 +1,20 @@ +{ + "lev=0": { + "Bx": 0.5334253070691776, + "By": 0.5318560243634998, + "Bz": 2252.108905639938, + "Ex": 10509838.331420777, + "Ey": 10512676.798857061, + "Ez": 8848.113963901804, + "rho": 384112.2912140536 + }, + "ions": { + "particle_momentum_x": 2.161294367543349e-16, + "particle_momentum_y": 2.161870747294985e-16, + "particle_momentum_z": 2.0513400435256855e-16, + "particle_position_x": 769864.202585846, + "particle_position_y": 769908.6569812088, + "particle_position_z": 620721.1900338201, + "particle_weight": 1.008292384042714e+19 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json new file mode 100644 index 00000000000..6fd2ca04fce --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json @@ -0,0 +1,20 @@ +{ + "lev=0": { + "Br": 0.01190012639573578, + "Bt": 0.011313481779415917, + "Bz": 11.684908684984164, + "Er": 154581.58512851578, + "Et": 4798.276941148807, + "Ez": 193.22344271401872, + "rho": 7968.182346905438 + }, + "ions": { + "particle_momentum_x": 3.1125151786241107e-18, + "particle_momentum_y": 3.119385993047207e-18, + "particle_momentum_z": 3.0289560038617916e-18, + "particle_position_x": 13628.662686419664, + "particle_position_y": 2285.6952310457755, + "particle_theta": 115055.48935725243, + "particle_weight": 2.525423582445981e+18 + } +} \ No newline at end of file diff --git a/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt b/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt index 19c2092d1a6..7539d706632 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt +++ b/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt @@ -3,6 +3,7 @@ foreach(D IN LISTS WarpX_DIMS) target_sources(lib_${SD} PRIVATE ComputeDivE.cpp + ComputeCurlA.cpp EvolveB.cpp EvolveBPML.cpp EvolveE.cpp diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp new file mode 100644 index 00000000000..d71eead1f75 --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp @@ -0,0 +1,308 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: S. Eric Clark (Helion Energy) + * + * License: BSD-3-Clause-LBNL + */ + +#include "FiniteDifferenceSolver.H" + +#include "EmbeddedBoundary/Enabled.H" +#ifdef WARPX_DIM_RZ +# include "FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" +#else +# include "FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H" +#endif + +#include "Utils/TextMsg.H" +#include "WarpX.H" + +#include + +using namespace amrex; + +void FiniteDifferenceSolver::ComputeCurlA ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev ) +{ + // Select algorithm (The choice of algorithm is a runtime option, + // but we compile code for each algorithm, using templates) + if (m_fdtd_algo == ElectromagneticSolverAlgo::HybridPIC) { +#ifdef WARPX_DIM_RZ + ComputeCurlACylindrical ( + Bfield, Afield, eb_update_B, lev + ); + +#else + ComputeCurlACartesian ( + Bfield, Afield, eb_update_B, lev + ); + +#endif + } else { + amrex::Abort(Utils::TextMsg::Err( + "ComputeCurl: Unknown algorithm choice.")); + } +} + +// /** +// * \brief Calculate B from the curl of A +// * i.e. B = curl(A) output field on B field mesh staggering +// * +// * \param[out] curlField output of curl operation +// * \param[in] field input staggered field, should be on E/J/A mesh staggering +// */ +#ifdef WARPX_DIM_RZ +template +void FiniteDifferenceSolver::ComputeCurlACylindrical ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev +) +{ + // for the profiler + amrex::LayoutData* cost = WarpX::getCosts(lev); + + // reset Bfield + Bfield[0]->setVal(0); + Bfield[1]->setVal(0); + Bfield[2]->setVal(0); + + // Loop through the grids, and over the tiles within each grid +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*Afield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) + { + amrex::Gpu::synchronize(); + } + Real wt = static_cast(amrex::second()); + + // Extract field data for this grid/tile + Array4 const& Ar = Afield[0]->const_array(mfi); + Array4 const& At = Afield[1]->const_array(mfi); + Array4 const& Az = Afield[2]->const_array(mfi); + Array4 const& Br = Bfield[0]->array(mfi); + Array4 const& Bt = Bfield[1]->array(mfi); + Array4 const& Bz = Bfield[2]->array(mfi); + + // Extract structures indicating where the fields + // should be updated, given the position of the embedded boundaries. + amrex::Array4 update_Br_arr, update_Bt_arr, update_Bz_arr; + if (EB::enabled()) { + update_Br_arr = eb_update_B[0]->array(mfi); + update_Bt_arr = eb_update_B[1]->array(mfi); + update_Bz_arr = eb_update_B[2]->array(mfi); + } + + // Extract stencil coefficients + Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); + int const n_coefs_r = static_cast(m_stencil_coefs_r.size()); + Real const * const AMREX_RESTRICT coefs_z = m_stencil_coefs_z.dataPtr(); + int const n_coefs_z = static_cast(m_stencil_coefs_z.size()); + + // Extract cylindrical specific parameters + Real const dr = m_dr; + int const nmodes = m_nmodes; + Real const rmin = m_rmin; + + // Extract tileboxes for which to loop over + Box const& tbr = mfi.tilebox(Bfield[0]->ixType().toIntVect()); + Box const& tbt = mfi.tilebox(Bfield[1]->ixType().toIntVect()); + Box const& tbz = mfi.tilebox(Bfield[2]->ixType().toIntVect()); + + // Calculate the B-field from the A-field + amrex::ParallelFor(tbr, tbt, tbz, + + // Br calculation + [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ + // Skip field update in the embedded boundaries + if (update_Br_arr && update_Br_arr(i, j, 0) == 0) { return; } + + Real const r = rmin + i*dr; // r on nodal point (Br is nodal in r) + if (r != 0) { // Off-axis, regular Maxwell equations + Br(i, j, 0, 0) = - T_Algo::UpwardDz(At, coefs_z, n_coefs_z, i, j, 0, 0); // Mode m=0 + for (int m=1; m(amrex::second()) - wt; + amrex::HostDevice::Atomic::Add( &(*cost)[mfi.index()], wt); + } + } +} + +#else + +template +void FiniteDifferenceSolver::ComputeCurlACartesian ( + ablastr::fields::VectorField & Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev +) +{ + using ablastr::fields::Direction; + + // for the profiler + amrex::LayoutData* cost = WarpX::getCosts(lev); + + // reset Bfield + Bfield[0]->setVal(0); + Bfield[1]->setVal(0); + Bfield[2]->setVal(0); + + // Loop through the grids, and over the tiles within each grid +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*Afield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) { + amrex::Gpu::synchronize(); + } + auto wt = static_cast(amrex::second()); + + // Extract field data for this grid/tile + Array4 const &Bx = Bfield[0]->array(mfi); + Array4 const &By = Bfield[1]->array(mfi); + Array4 const &Bz = Bfield[2]->array(mfi); + Array4 const &Ax = Afield[0]->const_array(mfi); + Array4 const &Ay = Afield[1]->const_array(mfi); + Array4 const &Az = Afield[2]->const_array(mfi); + + // Extract structures indicating where the fields + // should be updated, given the position of the embedded boundaries. + amrex::Array4 update_Bx_arr, update_By_arr, update_Bz_arr; + if (EB::enabled()) { + update_Bx_arr = eb_update_B[0]->array(mfi); + update_By_arr = eb_update_B[1]->array(mfi); + update_Bz_arr = eb_update_B[2]->array(mfi); + } + + // Extract stencil coefficients + Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); + auto const n_coefs_x = static_cast(m_stencil_coefs_x.size()); + Real const * const AMREX_RESTRICT coefs_y = m_stencil_coefs_y.dataPtr(); + auto const n_coefs_y = static_cast(m_stencil_coefs_y.size()); + Real const * const AMREX_RESTRICT coefs_z = m_stencil_coefs_z.dataPtr(); + auto const n_coefs_z = static_cast(m_stencil_coefs_z.size()); + + // Extract tileboxes for which to loop + Box const& tbx = mfi.tilebox(Bfield[0]->ixType().toIntVect()); + Box const& tby = mfi.tilebox(Bfield[1]->ixType().toIntVect()); + Box const& tbz = mfi.tilebox(Bfield[2]->ixType().toIntVect()); + + // Calculate the curl of A + amrex::ParallelFor(tbx, tby, tbz, + + // Bx calculation + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip field update in the embedded boundaries + if (update_Bx_arr && update_Bx_arr(i, j, k) == 0) { return; } + + Bx(i, j, k) = ( + - T_Algo::UpwardDz(Ay, coefs_z, n_coefs_z, i, j, k) + + T_Algo::UpwardDy(Az, coefs_y, n_coefs_y, i, j, k) + ); + }, + + // By calculation + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip field update in the embedded boundaries + if (update_By_arr && update_By_arr(i, j, k) == 0) { return; } + + By(i, j, k) = ( + - T_Algo::UpwardDx(Az, coefs_x, n_coefs_x, i, j, k) + + T_Algo::UpwardDz(Ax, coefs_z, n_coefs_z, i, j, k) + ); + }, + + // Bz calculation + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip field update in the embedded boundaries + if (update_Bz_arr && update_Bz_arr(i, j, k) == 0) { return; } + + Bz(i, j, k) = ( + - T_Algo::UpwardDy(Ax, coefs_y, n_coefs_y, i, j, k) + + T_Algo::UpwardDx(Ay, coefs_x, n_coefs_x, i, j, k) + ); + } + ); + + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) + { + amrex::Gpu::synchronize(); + wt = static_cast(amrex::second()) - wt; + amrex::HostDevice::Atomic::Add( &(*cost)[mfi.index()], wt); + } + } +} +#endif diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 19b822e3628..bcac1bcf0db 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -1,7 +1,10 @@ -/* Copyright 2020 Remi Lehe +/* Copyright 2020-2024 The WarpX Community * * This file is part of WarpX. * + * Authors: Remi Lehe (LBNL) + * S. Eric Clark (Helion Energy) + * * License: BSD-3-Clause-LBNL */ @@ -172,10 +175,25 @@ class FiniteDifferenceSolver * \param[in] lev level number for the calculation */ void CalculateCurrentAmpere ( - ablastr::fields::VectorField& Jfield, - ablastr::fields::VectorField const& Bfield, - std::array< std::unique_ptr,3> const& eb_update_E, - int lev ); + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + std::array< std::unique_ptr,3> const& eb_update_E, + int lev ); + + /** + * \brief Calculation of B field from the vector potential A + * B = (curl x A) / mu0. + * + * \param[out] Bfield vector of current MultiFabs at a given level + * \param[in] Afield vector of magnetic field MultiFabs at a given level + * \param[in] edge_lengths length of edges along embedded boundaries + * \param[in] lev level number for the calculation + */ + void ComputeCurlA ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev ); private: @@ -255,6 +273,14 @@ class FiniteDifferenceSolver int lev ); + template + void ComputeCurlACylindrical ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_E, + int lev + ); + #else template< typename T_Algo > void EvolveBCartesian ( @@ -358,6 +384,14 @@ class FiniteDifferenceSolver std::array< std::unique_ptr,3> const& eb_update_E, int lev ); + + template + void ComputeCurlACartesian ( + ablastr::fields::VectorField & Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev + ); #endif }; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt index 1367578b0aa..bb29baefcb9 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt @@ -3,5 +3,6 @@ foreach(D IN LISTS WarpX_DIMS) target_sources(lib_${SD} PRIVATE HybridPICModel.cpp + ExternalVectorPotential.cpp ) endforeach() diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H new file mode 100644 index 00000000000..71be73d5693 --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H @@ -0,0 +1,99 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: S. Eric Clark (Helion Energy) + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_EXTERNAL_VECTOR_POTENTIAL_H_ +#define WARPX_EXTERNAL_VECTOR_POTENTIAL_H_ + +#include "Fields.H" + +#include "Utils/WarpXAlgorithmSelection.H" + +#include "EmbeddedBoundary/Enabled.H" +#include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" +#include "Utils/Parser/ParserUtils.H" +#include "Utils/WarpXConst.H" +#include "Utils/WarpXProfilerWrapper.H" + +#include + +#include +#include +#include +#include +#include + +#include + +/** + * \brief This class contains the parameters needed to evaluate a + * time varying external vector potential, leading to external E/B + * fields to be applied in Hybrid Solver. This class is used to break up + * the passed in fields into a spatial and time dependent solution. + * + * Eventually this can be used in a list to control independent external + * fields with different time profiles. + * + */ +class ExternalVectorPotential +{ +protected: + int m_nFields; + + std::vector m_field_names; + + std::vector m_Ax_ext_grid_function; + std::vector m_Ay_ext_grid_function; + std::vector m_Az_ext_grid_function; + std::vector, 3>> m_A_external_parser; + std::vector, 3>> m_A_external; + + std::vector m_A_ext_time_function; + std::vector> m_A_external_time_parser; + std::vector> m_A_time_scale; + + std::vector m_read_A_from_file; + std::vector m_external_file_path; + +public: + + // Default Constructor + ExternalVectorPotential (); + + void ReadParameters (); + + void AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, + int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, + int ncomps, + const amrex::IntVect& ngEB, + const amrex::IntVect& Ex_nodal_flag, + const amrex::IntVect& Ey_nodal_flag, + const amrex::IntVect& Ez_nodal_flag, + const amrex::IntVect& Bx_nodal_flag, + const amrex::IntVect& By_nodal_flag, + const amrex::IntVect& Bz_nodal_flag + ); + + void InitData (); + + void CalculateExternalCurlA (); + void CalculateExternalCurlA (std::string& coil_name); + + AMREX_FORCE_INLINE + void ZeroFieldinEB ( + ablastr::fields::VectorField const& Field, + std::array< std::unique_ptr,3> const& eb_update); + + void UpdateHybridExternalFields ( + amrex::Real t, + amrex::Real dt + ); +}; + +#endif //WARPX_TIME_DEPENDENT_VECTOR_POTENTIAL_H_ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp new file mode 100644 index 00000000000..f8b2e604cf1 --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -0,0 +1,381 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: S. Eric Clark (Helion Energy) + * + * License: BSD-3-Clause-LBNL + */ + +#include "ExternalVectorPotential.H" +#include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" +#include "Fields.H" +#include "WarpX.H" + +#include + +using namespace amrex; +using namespace warpx::fields; + +ExternalVectorPotential::ExternalVectorPotential () +{ + ReadParameters(); +} + +void +ExternalVectorPotential::ReadParameters () +{ + const ParmParse pp_ext_A("external_vector_potential"); + + pp_ext_A.queryarr("fields", m_field_names); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!m_field_names.empty(), + "No external field names defined in external_vector_potential.fields"); + +// #if defined(WARPX_DIM_RZ) +// WARPX_ALWAYS_ASSERT_WITH_MESSAGE(false, +// "External Time Varying Fields in the Hybrid module is currently not supported. Coming Soon!"); +// #endif + + m_nFields = m_field_names.size(); + + // Resize vectors and set defaults + m_Ax_ext_grid_function.resize(m_nFields); + m_Ay_ext_grid_function.resize(m_nFields); + m_Az_ext_grid_function.resize(m_nFields); + for (std::string & field : m_Ax_ext_grid_function) { field = "0.0"; } + for (std::string & field : m_Ay_ext_grid_function) { field = "0.0"; } + for (std::string & field : m_Az_ext_grid_function) { field = "0.0"; } + + m_A_external_parser.resize(m_nFields); + m_A_external.resize(m_nFields); + + m_A_ext_time_function.resize(m_nFields); + for (std::string & field_time : m_A_ext_time_function) {field_time = "1.0"; } + + m_A_external_time_parser.resize(m_nFields); + m_A_time_scale.resize(m_nFields); + + m_read_A_from_file.resize(m_nFields); + m_external_file_path.resize(m_nFields); + for (std::string & file_name : m_external_file_path) { file_name = ""; } + + for (int i = 0; i < m_nFields; ++i) { + bool read_from_file = false; + utils::parser::queryWithParser(pp_ext_A, + (m_field_names[i]+".read_from_file").c_str(), read_from_file); + m_read_A_from_file[i] = read_from_file; + + if (m_read_A_from_file[i]) { + pp_ext_A.query((m_field_names[i]+".path").c_str(), m_external_file_path[i]); + } else { + pp_ext_A.query((m_field_names[i]+".Ax_external_grid_function(x,y,z)").c_str(), + m_Ax_ext_grid_function[i]); + pp_ext_A.query((m_field_names[i]+".Ay_external_grid_function(x,y,z)").c_str(), + m_Ay_ext_grid_function[i]); + pp_ext_A.query((m_field_names[i]+".Az_external_grid_function(x,y,z)").c_str(), + m_Az_ext_grid_function[i]); + } + + pp_ext_A.query((m_field_names[i]+".A_time_external_function(t)").c_str(), + m_A_ext_time_function[i]); + } +} + +void +ExternalVectorPotential::AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, + int lev, const BoxArray& ba, const DistributionMapping& dm, + const int ncomps, + const IntVect& ngEB, + const IntVect& Ex_nodal_flag, + const IntVect& Ey_nodal_flag, + const IntVect& Ez_nodal_flag, + const IntVect& Bx_nodal_flag, + const IntVect& By_nodal_flag, + const IntVect& Bz_nodal_flag) +{ + using ablastr::fields::Direction; + for (std::string const & field_name : m_field_names) { + const std::string Aext_field = field_name + std::string{"_Aext"}; + fields.alloc_init(Aext_field, Direction{0}, + lev, amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(Aext_field, Direction{1}, + lev, amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(Aext_field, Direction{2}, + lev, amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + + const std::string curlAext_field = field_name + std::string{"_curlAext"}; + fields.alloc_init(curlAext_field, Direction{0}, + lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(curlAext_field, Direction{1}, + lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(curlAext_field, Direction{2}, + lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + } + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{0}, + lev, amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{1}, + lev, amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{2}, + lev, amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{0}, + lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{1}, + lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{2}, + lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); +} + +void +ExternalVectorPotential::InitData () +{ + using ablastr::fields::Direction; + auto& warpx = WarpX::GetInstance(); + + for (int i = 0; i < m_nFields; ++i) { + + std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; + + if (m_read_A_from_file[i]) { + // Read A fields from file + for (auto lev = 0; lev <= warpx.finestLevel(); ++lev) { +#if defined(WARPX_DIM_RZ) + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{0}, lev), + "A", "r"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{1}, lev), + "A", "t"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{2}, lev), + "A", "z"); +#else + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{0}, lev), + "A", "x"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{1}, lev), + "A", "y"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{2}, lev), + "A", "z"); +#endif + } + } else { + // Initialize the A fields from expression + m_A_external_parser[i][0] = std::make_unique( + utils::parser::makeParser(m_Ax_ext_grid_function[i],{"x","y","z","t"})); + m_A_external_parser[i][1] = std::make_unique( + utils::parser::makeParser(m_Ay_ext_grid_function[i],{"x","y","z","t"})); + m_A_external_parser[i][2] = std::make_unique( + utils::parser::makeParser(m_Az_ext_grid_function[i],{"x","y","z","t"})); + m_A_external[i][0] = m_A_external_parser[i][0]->compile<4>(); + m_A_external[i][1] = m_A_external_parser[i][1]->compile<4>(); + m_A_external[i][2] = m_A_external_parser[i][2]->compile<4>(); + + // check if the external current parsers depend on time + for (int idim=0; idim<3; idim++) { + const std::set A_ext_symbols = m_A_external_parser[i][idim]->symbols(); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(A_ext_symbols.count("t") == 0, + "Externally Applied Vector potential time variation must be set with A_time_external_function(t)"); + } + + // Initialize data onto grid + for (auto lev = 0; lev <= warpx.finestLevel(); ++lev) { + warpx.ComputeExternalFieldOnGridUsingParser( + Aext_field, + m_A_external[i][0], + m_A_external[i][1], + m_A_external[i][2], + lev, PatchType::fine, + warpx.GetEBUpdateEFlag(), + false); + + for (int idir = 0; idir < 3; ++idir) { + warpx.m_fields.get(Aext_field, Direction{idir}, lev)-> + FillBoundary(warpx.Geom(lev).periodicity()); + } + } + } + + amrex::Gpu::streamSynchronize(); + + CalculateExternalCurlA(m_field_names[i]); + + // Generate parser for time function + m_A_external_time_parser[i] = std::make_unique( + utils::parser::makeParser(m_A_ext_time_function[i],{"t",})); + m_A_time_scale[i] = m_A_external_time_parser[i]->compile<1>(); + + } + + UpdateHybridExternalFields(warpx.gett_new(0), warpx.getdt(0)); +} + + +void +ExternalVectorPotential::CalculateExternalCurlA () +{ + for (auto fname : m_field_names) { + CalculateExternalCurlA(fname); + } +} + +void +ExternalVectorPotential::CalculateExternalCurlA (std::string& coil_name) +{ + using ablastr::fields::Direction; + auto & warpx = WarpX::GetInstance(); + + // Compute the curl of at at max and store + std::string Aext_field = coil_name + std::string{"_Aext"}; + std::string curlAext_field = coil_name + std::string{"_curlAext"}; + + ablastr::fields::MultiLevelVectorField A_ext = + warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField curlA_ext = + warpx.m_fields.get_mr_levels_alldirs(curlAext_field, warpx.finestLevel()); + + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + warpx.get_pointer_fdtd_solver_fp(lev)->ComputeCurlA( + curlA_ext[lev], + A_ext[lev], + warpx.GetEBUpdateBFlag()[lev], + lev); + + for (int idir = 0; idir < 3; ++idir) { + warpx.m_fields.get(curlAext_field, Direction{idir}, lev)-> + FillBoundary(warpx.Geom(lev).periodicity()); + } + } +} + +AMREX_FORCE_INLINE +void +ExternalVectorPotential::ZeroFieldinEB ( + ablastr::fields::VectorField const& Field, + std::array< std::unique_ptr,3> const& eb_update) +{ + // Loop through the grids, and over the tiles within each grid +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*Field[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + // Extract field data for this grid/tile + Array4 const& Fx = Field[0]->array(mfi); + Array4 const& Fy = Field[1]->array(mfi); + Array4 const& Fz = Field[2]->array(mfi); + + // Extract structures indicating where the fields + // should be updated, given the position of the embedded boundaries. + amrex::Array4 update_Fx_arr, update_Fy_arr, update_Fz_arr; + if (EB::enabled()) { + update_Fx_arr = eb_update[0]->array(mfi); + update_Fy_arr = eb_update[1]->array(mfi); + update_Fz_arr = eb_update[2]->array(mfi); + } + + // Extract tileboxes for which to loop + Box const& tbx = mfi.tilebox(Field[0]->ixType().toIntVect()); + Box const& tby = mfi.tilebox(Field[1]->ixType().toIntVect()); + Box const& tbz = mfi.tilebox(Field[2]->ixType().toIntVect()); + + // Loop over the cells and update the fields + amrex::ParallelFor(tbx, tby, tbz, + + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip field update in the embedded boundaries + if (update_Fx_arr && update_Fx_arr(i, j, k) == 0) { Fx(i, j, k) = 0_rt; } + }, + + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + if (update_Fy_arr && update_Fy_arr(i, j, k) == 0) { Fy(i, j, k) = 0_rt; } + }, + + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + if (update_Fz_arr && update_Fz_arr(i, j, k) == 0) { Fz(i, j, k) = 0_rt; } + } + ); + } +} + +void +ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const amrex::Real dt) +{ + using ablastr::fields::Direction; + auto& warpx = WarpX::GetInstance(); + + + ablastr::fields::MultiLevelVectorField B_ext = + warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_B_fp_external, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField E_ext = + warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_E_fp_external, warpx.finestLevel()); + + // Zero E and B external fields + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + for (int idir = 0; idir < 3; ++idir) { + B_ext[lev][Direction{idir}]->setVal(0.0_rt); + E_ext[lev][Direction{idir}]->setVal(0.0_rt); + } + } + + for (int i = 0; i < m_nFields; ++i) { + const std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; + const std::string curlAext_field = m_field_names[i] + std::string{"_curlAext"}; + + // Get B-field Scaling Factor + const amrex::Real scale_factor_B = m_A_time_scale[i](t); + + // Get dA/dt scaling factor based on time centered FD around t + const amrex::Real sf_l = m_A_time_scale[i](t-0.5_rt*dt); + const amrex::Real sf_r = m_A_time_scale[i](t+0.5_rt*dt); + const amrex::Real scale_factor_E = -(sf_r - sf_l)/dt; + + ablastr::fields::MultiLevelVectorField A_ext = + warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField curlA_ext = + warpx.m_fields.get_mr_levels_alldirs(curlAext_field, warpx.finestLevel()); + + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + for (int idir = 0; idir < 3; ++idir) { + // Scale A_ext by - \partial A / \partial t and add to E_ext + amrex::MultiFab::LinComb( + *E_ext[lev][Direction{idir}], + 1.0_rt, *E_ext[lev][Direction{idir}], 0, + scale_factor_E, *A_ext[lev][Direction{idir}], 0, + 0, 1, 0); + + // Scale curlA_ext by the t function and add to B_ext + amrex::MultiFab::LinComb( + *B_ext[lev][Direction{idir}], + 1.0_rt, *B_ext[lev][Direction{idir}], 0, + scale_factor_B, *curlA_ext[lev][Direction{idir}], 0, + 0, 1, 0); + } + + for (int idir = 0; idir < 3; ++idir) { + E_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); + B_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); + } + + if (EB::enabled()) { + ZeroFieldinEB(B_ext[lev], warpx.GetEBUpdateBFlag()[lev]); + ZeroFieldinEB(E_ext[lev], warpx.GetEBUpdateEFlag()[lev]); + } + } + } + amrex::Gpu::streamSynchronize(); +} diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 4b50c16a0c8..2a489e1c806 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -12,6 +13,9 @@ #include "HybridPICModel_fwd.H" +#include "Fields.H" + +#include "ExternalVectorPotential.H" #include "Utils/WarpXAlgorithmSelection.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" @@ -23,6 +27,9 @@ #include #include +#include +#include +#include #include @@ -39,11 +46,26 @@ public: void ReadParameters (); /** Allocate hybrid-PIC specific multifabs. Called in constructor. */ - void AllocateLevelMFs (ablastr::fields::MultiFabRegister & fields, - int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, - int ncomps, const amrex::IntVect& ngJ, const amrex::IntVect& ngRho, - const amrex::IntVect& jx_nodal_flag, const amrex::IntVect& jy_nodal_flag, - const amrex::IntVect& jz_nodal_flag, const amrex::IntVect& rho_nodal_flag); + void AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, + int lev, + const amrex::BoxArray& ba, + const amrex::DistributionMapping& dm, + int ncomps, + const amrex::IntVect& ngJ, + const amrex::IntVect& ngRho, + const amrex::IntVect& ngEB, + const amrex::IntVect& jx_nodal_flag, + const amrex::IntVect& jy_nodal_flag, + const amrex::IntVect& jz_nodal_flag, + const amrex::IntVect& rho_nodal_flag, + const amrex::IntVect& Ex_nodal_flag, + const amrex::IntVect& Ey_nodal_flag, + const amrex::IntVect& Ez_nodal_flag, + const amrex::IntVect& Bx_nodal_flag, + const amrex::IntVect& By_nodal_flag, + const amrex::IntVect& Bz_nodal_flag + ) const; void InitData (); @@ -142,7 +164,7 @@ public: * charge density (and assumption of quasi-neutrality) using the user * specified electron equation of state. * - * \param[out] Pe_filed scalar electron pressure MultiFab at a given level + * \param[out] Pe_field scalar electron pressure MultiFab at a given level * \param[in] rho_field scalar ion charge density Multifab at a given level */ void FillElectronPressureMF ( @@ -153,6 +175,8 @@ public: /** Number of substeps to take when evolving B */ int m_substeps = 10; + bool m_holmstrom_vacuum_region = false; + /** Electron temperature in eV */ amrex::Real m_elec_temp; /** Reference electron density */ @@ -178,7 +202,11 @@ public: std::string m_Jz_ext_grid_function = "0.0"; std::array< std::unique_ptr, 3> m_J_external_parser; std::array< amrex::ParserExecutor<4>, 3> m_J_external; - bool m_external_field_has_time_dependence = false; + bool m_external_current_has_time_dependence = false; + + /** External E/B fields */ + bool m_add_external_fields = false; + std::unique_ptr m_external_vector_potential; /** Gpu Vector with index type of the Jx multifab */ amrex::GpuArray Jx_IndexType; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 64ee83b10e0..428e36bca08 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -12,6 +13,8 @@ #include "EmbeddedBoundary/Enabled.H" #include "Python/callbacks.H" #include "Fields.H" +#include "Particles/MultiParticleContainer.H" +#include "ExternalVectorPotential.H" #include "WarpX.H" using namespace amrex; @@ -30,6 +33,8 @@ void HybridPICModel::ReadParameters () // of sub steps can be specified by the user (defaults to 50). utils::parser::queryWithParser(pp_hybrid, "substeps", m_substeps); + utils::parser::queryWithParser(pp_hybrid, "holmstrom_vacuum_region", m_holmstrom_vacuum_region); + // The hybrid model requires an electron temperature, reference density // and exponent to be given. These values will be used to calculate the // electron pressure according to p = n0 * Te * (n/n0)^gamma @@ -54,15 +59,31 @@ void HybridPICModel::ReadParameters () pp_hybrid.query("Jx_external_grid_function(x,y,z,t)", m_Jx_ext_grid_function); pp_hybrid.query("Jy_external_grid_function(x,y,z,t)", m_Jy_ext_grid_function); pp_hybrid.query("Jz_external_grid_function(x,y,z,t)", m_Jz_ext_grid_function); + + // external fields + pp_hybrid.query("add_external_fields", m_add_external_fields); + + if (m_add_external_fields) { + m_external_vector_potential = std::make_unique(); + } } -void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & fields, - int lev, const BoxArray& ba, const DistributionMapping& dm, - const int ncomps, const IntVect& ngJ, const IntVect& ngRho, - const IntVect& jx_nodal_flag, - const IntVect& jy_nodal_flag, - const IntVect& jz_nodal_flag, - const IntVect& rho_nodal_flag) +void HybridPICModel::AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, + int lev, const BoxArray& ba, const DistributionMapping& dm, + const int ncomps, + const IntVect& ngJ, const IntVect& ngRho, + const IntVect& ngEB, + const IntVect& jx_nodal_flag, + const IntVect& jy_nodal_flag, + const IntVect& jz_nodal_flag, + const IntVect& rho_nodal_flag, + const IntVect& Ex_nodal_flag, + const IntVect& Ey_nodal_flag, + const IntVect& Ez_nodal_flag, + const IntVect& Bx_nodal_flag, + const IntVect& By_nodal_flag, + const IntVect& Bz_nodal_flag) const { using ablastr::fields::Direction; @@ -114,6 +135,16 @@ void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & field lev, amrex::convert(ba, jz_nodal_flag), dm, ncomps, IntVect(1), 0.0_rt); + if (m_add_external_fields) { + m_external_vector_potential->AllocateLevelMFs( + fields, + lev, ba, dm, + ncomps, ngEB, + Ex_nodal_flag, Ey_nodal_flag, Ez_nodal_flag, + Bx_nodal_flag, By_nodal_flag, Bz_nodal_flag + ); + } + #ifdef WARPX_DIM_RZ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (ncomps == 1), @@ -142,7 +173,7 @@ void HybridPICModel::InitData () // check if the external current parsers depend on time for (int i=0; i<3; i++) { const std::set J_ext_symbols = m_J_external_parser[i]->symbols(); - m_external_field_has_time_dependence += J_ext_symbols.count("t"); + m_external_current_has_time_dependence += J_ext_symbols.count("t"); } auto & warpx = WarpX::GetInstance(); @@ -230,11 +261,15 @@ void HybridPICModel::InitData () lev, PatchType::fine, warpx.GetEBUpdateEFlag()); } + + if (m_add_external_fields) { + m_external_vector_potential->InitData(); + } } void HybridPICModel::GetCurrentExternal () { - if (!m_external_field_has_time_dependence) { return; } + if (!m_external_current_has_time_dependence) { return; } auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) @@ -541,6 +576,7 @@ void HybridPICModel::BfieldEvolveRK ( } } + void HybridPICModel::FieldPush ( ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelVectorField const& Efield, @@ -552,13 +588,29 @@ void HybridPICModel::FieldPush ( { auto& warpx = WarpX::GetInstance(); + amrex::Real const t_old = warpx.gett_old(0); + // Calculate J = curl x B / mu0 - J_ext CalculatePlasmaCurrent(Bfield, eb_update_E); // Calculate the E-field from Ohm's law HybridPICSolveE(Efield, Jfield, Bfield, rhofield, eb_update_E, true); warpx.FillBoundaryE(ng, nodal_sync); + warpx.ApplyEfieldBoundary(0, PatchType::fine, t_old); + // Push forward the B-field using Faraday's law - amrex::Real const t_old = warpx.gett_old(0); warpx.EvolveB(dt, dt_type, t_old); warpx.FillBoundaryB(ng, nodal_sync); + warpx.ApplyBfieldBoundary(0, PatchType::fine, dt_type, t_old); +} + +void +WarpX::CalculateExternalCurlA() { + WARPX_PROFILE("WarpX::CalculateExternalCurlA()"); + + auto & warpx = WarpX::GetInstance(); + + // Get reference to External Field Object + auto* ext_vector = warpx.m_hybrid_pic_model->m_external_vector_potential.get(); + ext_vector->CalculateExternalCurlA(); + } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package index 8145cfcef2f..d4fa9bfc390 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package @@ -1,3 +1,4 @@ CEXE_sources += HybridPICModel.cpp +CEXE_sources += ExternalVectorPotential.cpp VPATH_LOCATIONS += $(WARPX_HOME)/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 2047e87b696..b750a7e4f20 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -22,6 +23,7 @@ #include using namespace amrex; +using warpx::fields::FieldType; void FiniteDifferenceSolver::CalculateCurrentAmpere ( ablastr::fields::VectorField & Jfield, @@ -429,6 +431,17 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const bool include_hyper_resistivity_term = (eta_h > 0.0) && solve_for_Faraday; + const bool include_external_fields = hybrid_model->m_add_external_fields; + + const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; + + auto & warpx = WarpX::GetInstance(); + ablastr::fields::VectorField Bfield_external, Efield_external; + if (include_external_fields) { + Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 + Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 + } + // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations amrex::GpuArray const& Er_stag = hybrid_model->Ex_IndexType; @@ -485,6 +498,13 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Bt = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); + Array4 Br_ext, Bt_ext, Bz_ext; + if (include_external_fields) { + Br_ext = Bfield_external[0]->array(mfi); + Bt_ext = Bfield_external[1]->array(mfi); + Bz_ext = Bfield_external[2]->array(mfi); + } + // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ @@ -499,9 +519,15 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( auto const jiz_interp = Interp(Jiz, Jz_stag, nodal, coarsen, i, j, 0, 0); // interpolate the B field to a nodal grid - auto const Br_interp = Interp(Br, Br_stag, nodal, coarsen, i, j, 0, 0); - auto const Bt_interp = Interp(Bt, Bt_stag, nodal, coarsen, i, j, 0, 0); - auto const Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, 0, 0); + auto Br_interp = Interp(Br, Br_stag, nodal, coarsen, i, j, 0, 0); + auto Bt_interp = Interp(Bt, Bt_stag, nodal, coarsen, i, j, 0, 0); + auto Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, 0, 0); + + if (include_external_fields) { + Br_interp += Interp(Br_ext, Br_stag, nodal, coarsen, i, j, 0, 0); + Bt_interp += Interp(Bt_ext, Bt_stag, nodal, coarsen, i, j, 0, 0); + Bz_interp += Interp(Bz_ext, Bz_stag, nodal, coarsen, i, j, 0, 0); + } // calculate enE = (J - Ji) x B enE_nodal(i, j, 0, 0) = ( @@ -558,6 +584,13 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( update_Ez_arr = eb_update_E[2]->array(mfi); } + Array4 Er_ext, Et_ext, Ez_ext; + if (include_external_fields) { + Er_ext = Efield_external[0]->array(mfi); + Et_ext = Efield_external[1]->array(mfi); + Ez_ext = Efield_external[2]->array(mfi); + } + // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); int const n_coefs_r = static_cast(m_stencil_coefs_r.size()); @@ -582,7 +615,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (update_Er_arr && update_Er_arr(i, j, 0) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); + const Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -594,7 +628,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -604,7 +638,11 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_r = Interp(enE, nodal, Er_stag, coarsen, i, j, 0, 0); - Er(i, j, 0) = (enE_r - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Er(i, j, 0) = 0._rt; + } else { + Er(i, j, 0) = (enE_r - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Er(i, j, 0) += eta(rho_val, jtot_val) * Jr(i, j, 0); } @@ -617,6 +655,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( + T_Algo::Dzz(Jr, coefs_z, n_coefs_z, i, j, 0, 0) - jr_val/(r*r); Er(i, j, 0) -= eta_h * nabla2Jr; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Er(i, j, 0) -= Er_ext(i, j, 0); + } }, // Et calculation @@ -634,7 +676,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Et_stag, coarsen, i, j, 0, 0); + const Real rho_val = Interp(rho, nodal, Et_stag, coarsen, i, j, 0, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -646,7 +689,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure // -> d/dt = 0 for m = 0 @@ -655,7 +698,11 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_t = Interp(enE, nodal, Et_stag, coarsen, i, j, 0, 1); - Et(i, j, 0) = (enE_t - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Et(i, j, 0) = 0._rt; + } else { + Et(i, j, 0) = (enE_t - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); } @@ -664,9 +711,12 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const Real jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); auto nabla2Jt = T_Algo::Dr_rDr_over_r(Jt, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + T_Algo::Dzz(Jt, coefs_z, n_coefs_z, i, j, 0, 0) - jt_val/(r*r); - Et(i, j, 0) -= eta_h * nabla2Jt; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Et(i, j, 0) -= Et_ext(i, j, 0); + } }, // Ez calculation @@ -676,7 +726,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (update_Ez_arr && update_Ez_arr(i, j, 0) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); + const Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -688,7 +739,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -698,7 +749,11 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, 0, 2); - Ez(i, j, 0) = (enE_z - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Ez(i, j, 0) = 0._rt; + } else { + Ez(i, j, 0) = (enE_z - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); } @@ -714,6 +769,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Ez(i, j, 0) -= eta_h * nabla2Jz; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Ez(i, j, 0) -= Ez_ext(i, j, 0); + } } ); @@ -753,6 +812,17 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const bool include_hyper_resistivity_term = (eta_h > 0.) && solve_for_Faraday; + const bool include_external_fields = hybrid_model->m_add_external_fields; + + const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; + + auto & warpx = WarpX::GetInstance(); + ablastr::fields::VectorField Bfield_external, Efield_external; + if (include_external_fields) { + Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 + Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 + } + // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations amrex::GpuArray const& Ex_stag = hybrid_model->Ex_IndexType; @@ -809,6 +879,13 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& By = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); + Array4 Bx_ext, By_ext, Bz_ext; + if (include_external_fields) { + Bx_ext = Bfield_external[0]->array(mfi); + By_ext = Bfield_external[1]->array(mfi); + Bz_ext = Bfield_external[2]->array(mfi); + } + // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ @@ -823,9 +900,15 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( auto const jiz_interp = Interp(Jiz, Jz_stag, nodal, coarsen, i, j, k, 0); // interpolate the B field to a nodal grid - auto const Bx_interp = Interp(Bx, Bx_stag, nodal, coarsen, i, j, k, 0); - auto const By_interp = Interp(By, By_stag, nodal, coarsen, i, j, k, 0); - auto const Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, k, 0); + auto Bx_interp = Interp(Bx, Bx_stag, nodal, coarsen, i, j, k, 0); + auto By_interp = Interp(By, By_stag, nodal, coarsen, i, j, k, 0); + auto Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, k, 0); + + if (include_external_fields) { + Bx_interp += Interp(Bx_ext, Bx_stag, nodal, coarsen, i, j, k, 0); + By_interp += Interp(By_ext, By_stag, nodal, coarsen, i, j, k, 0); + Bz_interp += Interp(Bz_ext, Bz_stag, nodal, coarsen, i, j, k, 0); + } // calculate enE = (J - Ji) x B enE_nodal(i, j, k, 0) = ( @@ -882,6 +965,13 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( update_Ez_arr = eb_update_E[2]->array(mfi); } + Array4 Ex_ext, Ey_ext, Ez_ext; + if (include_external_fields) { + Ex_ext = Efield_external[0]->array(mfi); + Ey_ext = Efield_external[1]->array(mfi); + Ez_ext = Efield_external[2]->array(mfi); + } + // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); auto const n_coefs_x = static_cast(m_stencil_coefs_x.size()); @@ -904,7 +994,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (update_Ex_arr && update_Ex_arr(i, j, k) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); + const Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -916,7 +1007,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -926,7 +1017,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_x = Interp(enE, nodal, Ex_stag, coarsen, i, j, k, 0); - Ex(i, j, k) = (enE_x - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Ex(i, j, k) = 0._rt; + } else { + Ex(i, j, k) = (enE_x - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ex(i, j, k) += eta(rho_val, jtot_val) * Jx(i, j, k); } @@ -937,6 +1032,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( + T_Algo::Dzz(Jx, coefs_z, n_coefs_z, i, j, k); Ex(i, j, k) -= eta_h * nabla2Jx; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Ex(i, j, k) -= Ex_ext(i, j, k); + } }, // Ey calculation @@ -946,7 +1045,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (update_Ey_arr && update_Ey_arr(i, j, k) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); + const Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -958,7 +1058,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -968,7 +1068,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_y = Interp(enE, nodal, Ey_stag, coarsen, i, j, k, 1); - Ey(i, j, k) = (enE_y - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Ey(i, j, k) = 0._rt; + } else { + Ey(i, j, k) = (enE_y - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ey(i, j, k) += eta(rho_val, jtot_val) * Jy(i, j, k); } @@ -979,6 +1083,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( + T_Algo::Dzz(Jy, coefs_z, n_coefs_z, i, j, k); Ey(i, j, k) -= eta_h * nabla2Jy; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Ey(i, j, k) -= Ey_ext(i, j, k); + } }, // Ez calculation @@ -988,7 +1096,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (update_Ez_arr && update_Ez_arr(i, j, k) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); + const Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -1000,7 +1109,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -1010,7 +1119,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, k, 2); - Ez(i, j, k) = (enE_z - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Ez(i, j, k) = 0._rt; + } else { + Ez(i, j, k) = (enE_z - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ez(i, j, k) += eta(rho_val, jtot_val) * Jz(i, j, k); } @@ -1021,6 +1134,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( + T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); Ez(i, j, k) -= eta_h * nabla2Jz; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Ez(i, j, k) -= Ez_ext(i, j, k); + } } ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/Make.package b/Source/FieldSolver/FiniteDifferenceSolver/Make.package index b3708c411fa..bc71b9b51a2 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/Make.package +++ b/Source/FieldSolver/FiniteDifferenceSolver/Make.package @@ -5,6 +5,7 @@ CEXE_sources += EvolveF.cpp CEXE_sources += EvolveG.cpp CEXE_sources += EvolveECTRho.cpp CEXE_sources += ComputeDivE.cpp +CEXE_sources += ComputeCurlA.cpp CEXE_sources += MacroscopicEvolveE.cpp CEXE_sources += EvolveBPML.cpp CEXE_sources += EvolveEPML.cpp diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 18efba3f445..048c4b29cc0 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -33,6 +34,31 @@ void WarpX::HybridPICEvolveFields () finest_level == 0, "Ohm's law E-solve only works with a single level."); + // Get requested number of substeps to use + const int sub_steps = m_hybrid_pic_model->m_substeps; + + // Get flag to include external fields. + const bool add_external_fields = m_hybrid_pic_model->m_add_external_fields; + + // Handle field splitting for Hybrid field push + if (add_external_fields) { + // Get the external fields + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields( + gett_old(0), + 0.5_rt*dt[0]); + + // If using split fields, subtract the external field at the old time + for (int lev = 0; lev <= finest_level; ++lev) { + for (int idim = 0; idim < 3; ++idim) { + MultiFab::Subtract( + *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), + *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), + 0, 0, 1, + m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev)->nGrowVect()); + } + } + } + // The particles have now been pushed to their t_{n+1} positions. // Perform charge deposition in component 0 of rho_fp at t_{n+1}. mypc->DepositCharge(m_fields.get_mr_levels(FieldType::rho_fp, finest_level), 0._rt); @@ -64,9 +90,6 @@ void WarpX::HybridPICEvolveFields () } } - // Get requested number of substeps to use - const int sub_steps = m_hybrid_pic_model->m_substeps; - // Get the external current m_hybrid_pic_model->GetCurrentExternal(); @@ -127,6 +150,13 @@ void WarpX::HybridPICEvolveFields () ); } + if (add_external_fields) { + // Get the external fields at E^{n+1/2} + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields( + gett_old(0) + 0.5_rt*dt[0], + 0.5_rt*dt[0]); + } + // Now push the B field from t=n+1/2 to t=n+1 using the n+1/2 quantities for (int sub_step = 0; sub_step < sub_steps; sub_step++) { @@ -160,6 +190,12 @@ void WarpX::HybridPICEvolveFields () } } + if (add_external_fields) { + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields( + gett_new(0), + 0.5_rt*dt[0]); + } + // Calculate the electron pressure at t=n+1 m_hybrid_pic_model->CalculateElectronPressure(); @@ -174,6 +210,26 @@ void WarpX::HybridPICEvolveFields () m_fields.get_mr_levels(FieldType::rho_fp, finest_level), m_eb_update_E, false); FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); + // ApplyEfieldBoundary(0, PatchType::fine); + + // Handle field splitting for Hybrid field push + if (add_external_fields) { + // If using split fields, add the external field at the new time + for (int lev = 0; lev <= finest_level; ++lev) { + for (int idim = 0; idim < 3; ++idim) { + MultiFab::Add( + *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), + *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), + 0, 0, 1, + m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev)->nGrowVect()); + MultiFab::Add( + *m_fields.get(FieldType::Efield_fp, Direction{idim}, lev), + *m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev), + 0, 0, 1, + m_fields.get(FieldType::Efield_fp, Direction{idim}, lev)->nGrowVect()); + } + } + } // Copy the rho^{n+1} values to rho_fp_temp and the J_i^{n+1/2} values to // current_fp_temp since at the next step those values will be needed as diff --git a/Source/Fields.H b/Source/Fields.H index 77589c4675e..e47beec2880 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -50,19 +50,21 @@ namespace warpx::fields hybrid_current_fp_temp, /**< Used with Ohm's law solver. Stores the time interpolated/extrapolated current density */ hybrid_current_fp_plasma, /**< Used with Ohm's law solver. Stores plasma current calculated as J_plasma = curl x B / mu0 - J_ext */ hybrid_current_fp_external, /**< Used with Ohm's law solver. Stores external current */ - Efield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ - Bfield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ - current_cp, /**< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level */ - rho_cp, /**< Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level */ - F_cp, /**< Only used with MR. Used for divE cleaning, on the coarse patch of each level */ - G_cp, /**< Only used with MR. Used for divB cleaning, on the coarse patch of each level */ - Efield_cax, /**< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field */ - Bfield_cax, /**< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field */ - E_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ - B_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ - distance_to_eb, /**< Only used with embedded boundaries (EB). Stores the distance to the nearest EB */ - edge_lengths, /**< Only used with the ECT solver. Indicates the length of the cell edge that is covered by the EB, in SI units */ - face_areas, /**< Only used with the ECT solver. Indicates the area of the cell face that is covered by the EB, in SI units */ + hybrid_B_fp_external, /**< Used with Ohm's law solver. Stores external B field */ + hybrid_E_fp_external, /**< Used with Ohm's law solver. Stores external E field */ + Efield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level + Bfield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level + current_cp, //!< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level + rho_cp, //!< Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level + F_cp, //!< Only used with MR. Used for divE cleaning, on the coarse patch of each level + G_cp, //!< Only used with MR. Used for divB cleaning, on the coarse patch of each level + Efield_cax, //!< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field + Bfield_cax, //!< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field + E_external_particle_field, //!< Stores external particle fields provided by the user as through an openPMD file + B_external_particle_field, //!< Stores external particle fields provided by the user as through an openPMD file + distance_to_eb, //!< Only used with embedded boundaries (EB). Stores the distance to the nearest EB + edge_lengths, //!< Only used with the ECT solver. Indicates the length of the cell edge that is covered by the EB, in SI units + face_areas, //!< Only used with the ECT solver. Indicates the area of the cell face that is covered by the EB, in SI units area_mod, pml_E_fp, pml_B_fp, @@ -102,6 +104,8 @@ namespace warpx::fields FieldType::hybrid_current_fp_temp, FieldType::hybrid_current_fp_plasma, FieldType::hybrid_current_fp_external, + FieldType::hybrid_B_fp_external, + FieldType::hybrid_E_fp_external, FieldType::Efield_cp, FieldType::Bfield_cp, FieldType::current_cp, diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index cf452df56a2..0e605697df2 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1048,20 +1048,25 @@ WarpX::InitLevelData (int lev, Real /*time*/) } } -void WarpX::ComputeExternalFieldOnGridUsingParser ( - warpx::fields::FieldType field, +template +void ComputeExternalFieldOnGridUsingParser_template ( + T field, amrex::ParserExecutor<4> const& fx_parser, amrex::ParserExecutor<4> const& fy_parser, amrex::ParserExecutor<4> const& fz_parser, int lev, PatchType patch_type, - amrex::Vector,3 > > const& eb_update_field) + amrex::Vector,3 > > const& eb_update_field, + bool use_eb_flags) { - auto t = gett_new(lev); + auto &warpx = WarpX::GetInstance(); + auto const &geom = warpx.Geom(lev); - auto dx_lev = geom[lev].CellSizeArray(); - const RealBox& real_box = geom[lev].ProbDomain(); + auto t = warpx.gett_new(lev); - amrex::IntVect refratio = (lev > 0 ) ? RefRatio(lev-1) : amrex::IntVect(1); + auto dx_lev = geom.CellSizeArray(); + const RealBox& real_box = geom.ProbDomain(); + + amrex::IntVect refratio = (lev > 0 ) ? WarpX::RefRatio(lev-1) : amrex::IntVect(1); if (patch_type == PatchType::coarse) { for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { dx_lev[idim] = dx_lev[idim] * refratio[idim]; @@ -1069,9 +1074,9 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( } using ablastr::fields::Direction; - amrex::MultiFab* mfx = m_fields.get(field, Direction{0}, lev); - amrex::MultiFab* mfy = m_fields.get(field, Direction{1}, lev); - amrex::MultiFab* mfz = m_fields.get(field, Direction{2}, lev); + amrex::MultiFab* mfx = warpx.m_fields.get(field, Direction{0}, lev); + amrex::MultiFab* mfy = warpx.m_fields.get(field, Direction{1}, lev); + amrex::MultiFab* mfz = warpx.m_fields.get(field, Direction{2}, lev); const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); @@ -1087,7 +1092,7 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( auto const& mfzfab = mfz->array(mfi); amrex::Array4 update_fx_arr, update_fy_arr, update_fz_arr; - if (EB::enabled()) { + if (use_eb_flags && EB::enabled()) { update_fx_arr = eb_update_field[lev][0]->array(mfi); update_fy_arr = eb_update_field[lev][1]->array(mfi); update_fz_arr = eb_update_field[lev][2]->array(mfi); @@ -1181,6 +1186,68 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( } } +void WarpX::ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field, + bool use_eb_flags) +{ + ComputeExternalFieldOnGridUsingParser_template ( + field, + fx_parser, fy_parser, fz_parser, + lev, patch_type, eb_update_field, + use_eb_flags); +} + +void WarpX::ComputeExternalFieldOnGridUsingParser ( + std::string const& field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field, + bool use_eb_flags) +{ + ComputeExternalFieldOnGridUsingParser_template ( + field, + fx_parser, fy_parser, fz_parser, + lev, patch_type, eb_update_field, + use_eb_flags); +} + +void WarpX::ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field) +{ + ComputeExternalFieldOnGridUsingParser_template ( + field, + fx_parser, fy_parser, fz_parser, + lev, patch_type, eb_update_field, + true); +} + +void WarpX::ComputeExternalFieldOnGridUsingParser ( + std::string const& field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field) +{ + ComputeExternalFieldOnGridUsingParser_template ( + field, + fx_parser, fy_parser, fz_parser, + lev, patch_type, eb_update_field, + true); +} + void WarpX::CheckGuardCells() { for (int lev = 0; lev <= max_level; ++lev) @@ -1308,8 +1375,10 @@ void WarpX::CheckKnownIssues() mypc->m_B_ext_particle_s != "none" || mypc->m_E_ext_particle_s != "none" ); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - !external_particle_field_used, - "The hybrid-PIC algorithm does not work with external fields " + (!external_particle_field_used + || mypc->m_B_ext_particle_s == "parse_b_ext_particle_function" + || mypc->m_E_ext_particle_s == "parse_e_ext_particle_function"), + "The hybrid-PIC algorithm only works with analytical external E/B fields " "applied directly to particles." ); } diff --git a/Source/Particles/Gather/GetExternalFields.H b/Source/Particles/Gather/GetExternalFields.H index 7000d6d7c26..90a61bd25db 100644 --- a/Source/Particles/Gather/GetExternalFields.H +++ b/Source/Particles/Gather/GetExternalFields.H @@ -112,9 +112,9 @@ struct GetExternalEBField lab_time = m_gamma_boost*m_time + m_uz_boost*z*inv_c2; z = m_gamma_boost*z + m_uz_boost*m_time; } - Bx = m_Bxfield_partparser(x, y, z, lab_time); - By = m_Byfield_partparser(x, y, z, lab_time); - Bz = m_Bzfield_partparser(x, y, z, lab_time); + Bx = m_Bxfield_partparser((amrex::ParticleReal) x, (amrex::ParticleReal) y, (amrex::ParticleReal) z, lab_time); + By = m_Byfield_partparser((amrex::ParticleReal) x, (amrex::ParticleReal) y, (amrex::ParticleReal) z, lab_time); + Bz = m_Bzfield_partparser((amrex::ParticleReal) x, (amrex::ParticleReal) y, (amrex::ParticleReal) z, lab_time); } if (m_Etype == RepeatedPlasmaLens || diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 870a3a87c91..5b4b07af07b 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -270,6 +270,10 @@ The physical fields in WarpX have the following naming: [] (WarpX& wx) { wx.ProjectionCleanDivB(); }, "Executes projection based divergence cleaner on loaded Bfield_fp_external." ) + .def_static("calculate_hybrid_external_curlA", + [] (WarpX& wx) { wx.CalculateExternalCurlA(); }, + "Executes calculation of the curl of the external A in the hybrid solver." + ) .def("synchronize", [] (WarpX& wx) { wx.Synchronize(); }, "Synchronize particle velocities and positions." diff --git a/Source/WarpX.H b/Source/WarpX.H index 077e8f5d954..e16ab757f77 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -164,6 +164,7 @@ public: MultiDiagnostics& GetMultiDiags () {return *multi_diags;} ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; } amrex::Vector,3 > >& GetEBUpdateEFlag() { return m_eb_update_E; } + amrex::Vector,3 > >& GetEBUpdateBFlag() { return m_eb_update_B; } amrex::Vector< std::unique_ptr > const & GetEBReduceParticleShapeFlag() const { return m_eb_reduce_particle_shape; } static void shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, @@ -848,6 +849,7 @@ public: void ComputeDivE(amrex::MultiFab& divE, int lev); void ProjectionCleanDivB (); + void CalculateExternalCurlA (); [[nodiscard]] amrex::IntVect getngEB() const { return guard_cells.ng_alloc_EB; } [[nodiscard]] amrex::IntVect getngF() const { return guard_cells.ng_alloc_F; } @@ -892,14 +894,24 @@ public: * on the staggered yee-grid or cell-centered grid, in the interior cells * and guard cells. * - * \param[in] field FieldType + * \param[in] field FieldType to grab from register to write into * \param[in] fx_parser parser function to initialize x-field * \param[in] fy_parser parser function to initialize y-field * \param[in] fz_parser parser function to initialize z-field * \param[in] lev level of the Multifabs that is initialized * \param[in] patch_type PatchType on which the field is initialized (fine or coarse) * \param[in] eb_update_field flag indicating which gridpoints should be modified by this functions + * \param[in] use_eb_flags (default:true) flag indicating if eb points should be excluded or not */ + void ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field, + bool use_eb_flags); + void ComputeExternalFieldOnGridUsingParser ( warpx::fields::FieldType field, amrex::ParserExecutor<4> const& fx_parser, @@ -908,6 +920,44 @@ public: int lev, PatchType patch_type, amrex::Vector,3 > > const& eb_update_field); + /** + * \brief + * This function computes the E, B, and J fields on each level + * using the parser and the user-defined function for the external fields. + * The subroutine will parse the x_/y_z_external_grid_function and + * then, the field multifab is initialized based on the (x,y,z) position + * on the staggered yee-grid or cell-centered grid, in the interior cells + * and guard cells. + * + * \param[in] field string containing field name to grab from register + * \param[in] fx_parser parser function to initialize x-field + * \param[in] fy_parser parser function to initialize y-field + * \param[in] fz_parser parser function to initialize z-field + * \param[in] edge_lengths edge lengths information + * \param[in] face_areas face areas information + * \param[in] topology flag indicating if field is edge-based or face-based + * \param[in] lev level of the Multifabs that is initialized + * \param[in] patch_type PatchType on which the field is initialized (fine or coarse) + * \param[in] eb_update_field flag indicating which gridpoints should be modified by this functions + * \param[in] use_eb_flags (default:true) flag indicating if eb points should be excluded or not + */ + void ComputeExternalFieldOnGridUsingParser ( + std::string const& field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector< std::array< std::unique_ptr,3> > const& eb_update_field, + bool use_eb_flags); + + void ComputeExternalFieldOnGridUsingParser ( + std::string const& field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector< std::array< std::unique_ptr,3> > const& eb_update_field); + /** * \brief Load field values from a user-specified openPMD file, * for the fields Ex, Ey, Ez, Bx, By, Bz diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index a1eac8d6080..fe89b04807e 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -768,7 +768,7 @@ WarpX::ReadParameters () use_kspace_filter = use_filter; use_filter = false; } - else // FDTD + else if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::HybridPIC) { // Filter currently not working with FDTD solver in RZ geometry along R // (see https://github.com/ECP-WarpX/WarpX/issues/1943) @@ -2282,8 +2282,9 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm { m_hybrid_pic_model->AllocateLevelMFs( m_fields, - lev, ba, dm, ncomps, ngJ, ngRho, jx_nodal_flag, jy_nodal_flag, - jz_nodal_flag, rho_nodal_flag + lev, ba, dm, ncomps, ngJ, ngRho, ngEB, jx_nodal_flag, jy_nodal_flag, + jz_nodal_flag, rho_nodal_flag, Ex_nodal_flag, Ey_nodal_flag, Ez_nodal_flag, + Bx_nodal_flag, By_nodal_flag, Bz_nodal_flag ); }