diff --git a/.gitignore b/.gitignore index 57ac21eed..53cdf57bb 100644 --- a/.gitignore +++ b/.gitignore @@ -48,3 +48,7 @@ results.json results.stats.csv results.csv results.sysinfo.txt +*.mp4 +*.out +*.err +ext-examples/ diff --git a/docs/MeshGeneration/StructuredMesh.md b/docs/MeshGeneration/StructuredMesh.md index 67441ba80..eff24038e 100644 --- a/docs/MeshGeneration/StructuredMesh.md +++ b/docs/MeshGeneration/StructuredMesh.md @@ -56,7 +56,7 @@ You can set boundary conditions for each of the four sides of the structured mes * `SELF_BC_PRESCRIBED` * `SELF_BC_RADIATION` -The tiled layout is convenient for domain decomposition, when you are wanting to scale up your application for distributed memory platforms. You can further enable domain decomposition by setting the optional `enableDomainDecompisition` input to `.true.` . In this case, when you launch your application with `mpirun`, the domain will be automatically divided as evenly as possible across all MPI ranks. +The tiled layout is convenient for domain decomposition, when you are wanting to scale up your application for distributed memory platforms. Domain decomposition is automatically enabled when you launch your application with `mpirun/mpiexec/srun` with more than one rank. In this case, the domain will be automatically divided as evenly as possible across all MPI ranks. !!! note It's good practice to set the total number of tiles equal to the number of MPI ranks that you are running with. Alternatively, you can use fairly small tiles when working with a large number of MPI ranks to increase the chance of minimizing point-to-point communications . @@ -66,7 +66,7 @@ In the example below, we create a 2-D mesh with the following attributes * $2 × 2$ tiles for the domain * $10 × 10$ elements per tile * Each element is has dimensions of $0.05 × 0.05$. The domain dimensions are then $L_x × L_y = 1 × 1$ -* Domain decomposition is enabled +* Domain decomposition is enabled automatically when launched with more than one mpi rank The geometry fields are created from the mesh information and a $7^{th}$ degree interpolant through the Legendre-Gauss points. @@ -124,7 +124,7 @@ You can set boundary conditions for each of the four sides of the structured mes * `SELF_BC_PRESCRIBED` * `SELF_BC_RADIATION` -The tiled layout is convenient for domain decomposition, when you are wanting to scale up your application for distributed memory platforms. You can further enable domain decomposition by setting the optional `enableDomainDecompisition` input to `.true.` . In this case, when you launch your application with `mpirun`, the domain will be automatically divided as evenly as possible across all MPI ranks. +The tiled layout is convenient for domain decomposition, when you are wanting to scale up your application for distributed memory platforms. Domain decomposition is automatically enabled when you launch your application with `mpirun/mpiexec/srun` with more than one rank. In this case, the domain will be automatically divided as evenly as possible across all MPI ranks. !!! note It's good practice to set the total number of tiles equal to the number of MPI ranks that you are running with. Alternatively, you can use fairly small tiles when working with a large number of MPI ranks to increase the chance of minimizing point-to-point communications . diff --git a/src/SELF_DomainDecomposition_t.f90 b/src/SELF_DomainDecomposition_t.f90 index 9ef1bfab4..a2f591185 100644 --- a/src/SELF_DomainDecomposition_t.f90 +++ b/src/SELF_DomainDecomposition_t.f90 @@ -63,12 +63,9 @@ module SELF_DomainDecomposition_t contains - subroutine Init_DomainDecomposition_t(this,enableMPI) -#undef __FUNC__ -#define __FUNC__ "Init_DomainDecomposition_t" + subroutine Init_DomainDecomposition_t(this) implicit none class(DomainDecomposition_t),intent(inout) :: this - logical,intent(in) :: enableMPI ! Local integer :: ierror @@ -77,17 +74,19 @@ subroutine Init_DomainDecomposition_t(this,enableMPI) this%rankId = 0 this%nRanks = 1 this%nElem = 0 - this%mpiEnabled = enableMPI - - if(enableMPI) then - this%mpiComm = MPI_COMM_WORLD - print*,__FILE__," : Initializing MPI" - call mpi_init(ierror) - call mpi_comm_rank(this%mpiComm,this%rankId,ierror) - call mpi_comm_size(this%mpiComm,this%nRanks,ierror) - print*,__FILE__," : Rank ",this%rankId+1,"/",this%nRanks," checking in." + this%mpiEnabled = .false. + + this%mpiComm = MPI_COMM_WORLD + print*,__FILE__," : Initializing MPI" + call mpi_init(ierror) + call mpi_comm_rank(this%mpiComm,this%rankId,ierror) + call mpi_comm_size(this%mpiComm,this%nRanks,ierror) + print*,__FILE__," : Rank ",this%rankId+1,"/",this%nRanks," checking in." + + if(this%nRanks > 1) then + this%mpiEnabled = .true. else - print*,__FILE__," : MPI not initialized. No domain decomposition used." + print*,__FILE__," : No domain decomposition used." endif if(prec == real32) then @@ -120,10 +119,10 @@ subroutine Free_DomainDecomposition_t(this) if(allocated(this%requests)) deallocate(this%requests) if(allocated(this%stats)) deallocate(this%stats) - if(this%mpiEnabled) then - print*,__FILE__," : Rank ",this%rankId+1,"/",this%nRanks," checking out." - call MPI_FINALIZE(ierror) - endif + !if(this%mpiEnabled) then + print*,__FILE__," : Rank ",this%rankId+1,"/",this%nRanks," checking out." + call MPI_FINALIZE(ierror) + !endif endsubroutine Free_DomainDecomposition_t diff --git a/src/SELF_Mesh_1D.f90 b/src/SELF_Mesh_1D.f90 index 6b41d893a..b9708b0c8 100644 --- a/src/SELF_Mesh_1D.f90 +++ b/src/SELF_Mesh_1D.f90 @@ -84,7 +84,7 @@ subroutine Init_Mesh1D(this,nElem,nNodes,nBCs) allocate(this%BCType(1:4,1:nBCs)) allocate(this%BCNames(1:nBCs)) - call this%decomp%Init(.false.) + call this%decomp%Init() endsubroutine Init_Mesh1D diff --git a/src/SELF_Mesh_2D_t.f90 b/src/SELF_Mesh_2D_t.f90 index 224dc4bc6..31d020d6e 100644 --- a/src/SELF_Mesh_2D_t.f90 +++ b/src/SELF_Mesh_2D_t.f90 @@ -227,7 +227,7 @@ subroutine ResetBoundaryConditionType_Mesh2D_t(this,bcid) endsubroutine ResetBoundaryConditionType_Mesh2D_t - subroutine UniformStructuredMesh_Mesh2D_t(this,nxPerTile,nyPerTile,nTileX,nTileY,dx,dy,bcids,enableDomainDecomposition) + subroutine UniformStructuredMesh_Mesh2D_t(this,nxPerTile,nyPerTile,nTileX,nTileY,dx,dy,bcids) !! !! Create a structured mesh and store it in SELF's unstructured mesh format. !! The mesh is created in tiles of size (tnx,tny). Tiling is used to determine @@ -263,7 +263,6 @@ subroutine UniformStructuredMesh_Mesh2D_t(this,nxPerTile,nyPerTile,nTileX,nTileY real(prec),intent(in) :: dx real(prec),intent(in) :: dy integer,intent(in) :: bcids(1:4) - logical,optional,intent(in) :: enableDomainDecomposition ! Local integer :: nX,nY,nGeo,nBCs integer :: nGlobalElem @@ -281,11 +280,8 @@ subroutine UniformStructuredMesh_Mesh2D_t(this,nxPerTile,nyPerTile,nTileX,nTileY integer :: e1,e2 integer :: nedges - if(present(enableDomainDecomposition)) then - call this%decomp%init(enableDomainDecomposition) - else - call this%decomp%init(.false.) - endif + call this%decomp%init() + nX = nTileX*nxPerTile nY = nTileY*nyPerTile nGeo = 1 ! Force the geometry to be linear @@ -459,13 +455,12 @@ subroutine UniformStructuredMesh_Mesh2D_t(this,nxPerTile,nyPerTile,nTileX,nTileY endsubroutine UniformStructuredMesh_Mesh2D_t - subroutine Read_HOPr_Mesh2D_t(this,meshFile,enableDomainDecomposition) + subroutine Read_HOPr_Mesh2D_t(this,meshFile) ! From https://www.hopr-project.org/externals/Meshformat.pdf, Algorithm 6 ! Adapted for 2D Mesh : Note that HOPR does not have 2D mesh output. implicit none class(Mesh2D_t),intent(out) :: this character(*),intent(in) :: meshFile - logical,intent(in),optional :: enableDomainDecomposition ! Local integer(HID_T) :: fileId integer(HID_T) :: offset(1:2),gOffset(1) @@ -489,11 +484,7 @@ subroutine Read_HOPr_Mesh2D_t(this,meshFile,enableDomainDecomposition) integer,dimension(:),allocatable :: hopr_globalNodeIDs integer,dimension(:,:),allocatable :: bcType - if(present(enableDomainDecomposition)) then - call this%decomp%init(enableDomainDecomposition) - else - call this%decomp%init(.false.) - endif + call this%decomp%init() print*,__FILE__//' : Reading HOPr mesh from'//trim(meshfile) if(this%decomp%mpiEnabled) then diff --git a/src/SELF_Mesh_3D_t.f90 b/src/SELF_Mesh_3D_t.f90 index dddc19aa3..c415c0022 100644 --- a/src/SELF_Mesh_3D_t.f90 +++ b/src/SELF_Mesh_3D_t.f90 @@ -346,7 +346,7 @@ pure function elementid(i,j,k,ti,tj,tk,nxpertile,nypertile,nzpertile, & endfunction elementid subroutine UniformStructuredMesh_Mesh3D_t(this,nxPerTile,nyPerTile,nzPerTile, & - nTileX,nTileY,nTileZ,dx,dy,dz,bcids,enableDomainDecomposition) + nTileX,nTileY,nTileZ,dx,dy,dz,bcids) !! !! Create a structured mesh and store it in SELF's unstructured mesh format. !! The mesh is created in tiles of size (tnx,tny,tnz). Tiling is used to determine @@ -388,7 +388,6 @@ subroutine UniformStructuredMesh_Mesh3D_t(this,nxPerTile,nyPerTile,nzPerTile, & real(prec),intent(in) :: dy real(prec),intent(in) :: dz integer,intent(in) :: bcids(1:6) - logical,optional,intent(in) :: enableDomainDecomposition ! Local integer :: nX,nY,nZ,nGeo,nBCs integer :: nGlobalElem @@ -406,11 +405,8 @@ subroutine UniformStructuredMesh_Mesh3D_t(this,nxPerTile,nyPerTile,nzPerTile, & integer :: e1,e2,s1,s2 integer :: nfaces - if(present(enableDomainDecomposition)) then - call this%decomp%init(enableDomainDecomposition) - else - call this%decomp%init(.false.) - endif + call this%decomp%init() + nX = nTileX*nxPerTile nY = nTileY*nyPerTile nZ = nTileZ*nzPerTile @@ -699,12 +695,11 @@ subroutine UniformStructuredMesh_Mesh3D_t(this,nxPerTile,nyPerTile,nzPerTile, & endsubroutine UniformStructuredMesh_Mesh3D_t - subroutine Read_HOPr_Mesh3D_t(this,meshFile,enableDomainDecomposition) + subroutine Read_HOPr_Mesh3D_t(this,meshFile) ! From https://www.hopr-project.org/externals/Meshformat.pdf, Algorithm 6 implicit none class(Mesh3D_t),intent(out) :: this character(*),intent(in) :: meshFile - logical,intent(in),optional :: enableDomainDecomposition ! Local integer(HID_T) :: fileId integer(HID_T) :: offset(1:2),gOffset(1) @@ -725,11 +720,7 @@ subroutine Read_HOPr_Mesh3D_t(this,meshFile,enableDomainDecomposition) integer,dimension(:),allocatable :: hopr_globalNodeIDs integer,dimension(:,:),allocatable :: bcType - if(present(enableDomainDecomposition)) then - call this%decomp%init(enableDomainDecomposition) - else - call this%decomp%init(.false.) - endif + call this%decomp%init() if(this%decomp%mpiEnabled) then call Open_HDF5(meshFile,H5F_ACC_RDONLY_F,fileId,this%decomp%mpiComm) diff --git a/src/gpu/SELF_DomainDecomposition.f90 b/src/gpu/SELF_DomainDecomposition.f90 index e2e9124e5..586fd88c7 100644 --- a/src/gpu/SELF_DomainDecomposition.f90 +++ b/src/gpu/SELF_DomainDecomposition.f90 @@ -46,10 +46,9 @@ module SELF_DomainDecomposition contains - subroutine Init_DomainDecomposition(this,enableMPI) + subroutine Init_DomainDecomposition(this) implicit none class(DomainDecomposition),intent(inout) :: this - logical,intent(in) :: enableMPI ! Local integer :: ierror integer(c_int) :: num_devices,hip_err,device_id @@ -59,17 +58,19 @@ subroutine Init_DomainDecomposition(this,enableMPI) this%rankId = 0 this%nRanks = 1 this%nElem = 0 - this%mpiEnabled = enableMPI - - if(enableMPI) then - this%mpiComm = MPI_COMM_WORLD - print*,__FILE__," : Initializing MPI" - call mpi_init(ierror) - call mpi_comm_rank(this%mpiComm,this%rankId,ierror) - call mpi_comm_size(this%mpiComm,this%nRanks,ierror) - print*,__FILE__," : Rank ",this%rankId+1,"/",this%nRanks," checking in." + this%mpiEnabled = .false. + + this%mpiComm = MPI_COMM_WORLD + print*,__FILE__," : Initializing MPI" + call mpi_init(ierror) + call mpi_comm_rank(this%mpiComm,this%rankId,ierror) + call mpi_comm_size(this%mpiComm,this%nRanks,ierror) + print*,__FILE__," : Rank ",this%rankId+1,"/",this%nRanks," checking in." + + if(this%nRanks > 1) then + this%mpiEnabled = .true. else - print*,__FILE__," : MPI not initialized. No domain decomposition used." + print*,__FILE__," : No domain decomposition used." endif if(prec == real32) then @@ -115,10 +116,8 @@ subroutine Free_DomainDecomposition(this) if(allocated(this%requests)) deallocate(this%requests) if(allocated(this%stats)) deallocate(this%stats) - if(this%mpiEnabled) then - print*,__FILE__," : Rank ",this%rankId+1,"/",this%nRanks," checking out." - call MPI_FINALIZE(ierror) - endif + print*,__FILE__," : Rank ",this%rankId+1,"/",this%nRanks," checking out." + call MPI_FINALIZE(ierror) endsubroutine Free_DomainDecomposition diff --git a/test/advection_diffusion_2d_rk3_mpi.f90 b/test/advection_diffusion_2d_rk3_mpi.f90 index c2a48251d..5fa72b177 100644 --- a/test/advection_diffusion_2d_rk3_mpi.f90 +++ b/test/advection_diffusion_2d_rk3_mpi.f90 @@ -48,7 +48,7 @@ program advection_diffusion_2d_rk3 ! Create a uniform block mesh call get_environment_variable("WORKSPACE",WORKSPACE) - call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block2D/Block2D_mesh.h5",enableDomainDecomposition=.true.) + call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block2D/Block2D_mesh.h5") ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/advection_diffusion_2d_rk3_pickup_mpi.f90 b/test/advection_diffusion_2d_rk3_pickup_mpi.f90 index 8ee8a37f1..6f830bd39 100644 --- a/test/advection_diffusion_2d_rk3_pickup_mpi.f90 +++ b/test/advection_diffusion_2d_rk3_pickup_mpi.f90 @@ -48,7 +48,7 @@ program advection_diffusion_2d_rk3 ! Create a uniform block mesh call get_environment_variable("WORKSPACE",WORKSPACE) - call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block2D/Block2D_mesh.h5",enableDomainDecomposition=.true.) + call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block2D/Block2D_mesh.h5") ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/advection_diffusion_3d_rk3_mpi.f90 b/test/advection_diffusion_3d_rk3_mpi.f90 index ef38e488a..22479518b 100644 --- a/test/advection_diffusion_3d_rk3_mpi.f90 +++ b/test/advection_diffusion_3d_rk3_mpi.f90 @@ -49,7 +49,7 @@ program advection_diffusion_3d_rk3 ! Create a uniform block mesh call get_environment_variable("WORKSPACE",WORKSPACE) - call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block3D/Block3D_mesh.h5",enableDomainDecomposition=.true.) + call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block3D/Block3D_mesh.h5") ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/advection_diffusion_3d_rk3_pickup_mpi.f90 b/test/advection_diffusion_3d_rk3_pickup_mpi.f90 index d5124cf81..479a070a8 100644 --- a/test/advection_diffusion_3d_rk3_pickup_mpi.f90 +++ b/test/advection_diffusion_3d_rk3_pickup_mpi.f90 @@ -49,7 +49,7 @@ program advection_diffusion_3d_rk3 ! Create a uniform block mesh call get_environment_variable("WORKSPACE",WORKSPACE) - call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block3D/Block3D_mesh.h5",enableDomainDecomposition=.true.) + call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block3D/Block3D_mesh.h5") ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/mappedscalarbrgradient_2d_linear_mpi.f90 b/test/mappedscalarbrgradient_2d_linear_mpi.f90 index c621d40c2..5d42c6541 100644 --- a/test/mappedscalarbrgradient_2d_linear_mpi.f90 +++ b/test/mappedscalarbrgradient_2d_linear_mpi.f90 @@ -67,7 +67,7 @@ integer function mappedscalarbrgradient_2d_linear() result(r) ! Create a uniform block mesh call get_environment_variable("WORKSPACE",WORKSPACE) - call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block2D/Block2D_mesh.h5",enableDomainDecomposition=.true.) + call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block2D/Block2D_mesh.h5") ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/mappedscalarbrgradient_3d_linear_mpi.f90 b/test/mappedscalarbrgradient_3d_linear_mpi.f90 index c72ffeb18..d11b7f0bf 100644 --- a/test/mappedscalarbrgradient_3d_linear_mpi.f90 +++ b/test/mappedscalarbrgradient_3d_linear_mpi.f90 @@ -69,7 +69,7 @@ integer function mappedscalarbrgradient_3d_linear() result(r) ! Create a uniform block mesh call get_environment_variable("WORKSPACE",WORKSPACE) - call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block3D/Block3D_mesh.h5",enableDomainDecomposition=.true.) + call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block3D/Block3D_mesh.h5") ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/mappedvectordgdivergence_2d_linear_mpi.f90 b/test/mappedvectordgdivergence_2d_linear_mpi.f90 index a330cf9c0..89e60989f 100644 --- a/test/mappedvectordgdivergence_2d_linear_mpi.f90 +++ b/test/mappedvectordgdivergence_2d_linear_mpi.f90 @@ -65,7 +65,7 @@ integer function mappedvectordgdivergence_2d_linear() result(r) ! Create a uniform block mesh call get_environment_variable("WORKSPACE",WORKSPACE) - call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block2D/Block2D_mesh.h5",enableDomainDecomposition=.true.) + call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block2D/Block2D_mesh.h5") ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/mappedvectordgdivergence_2d_linear_sideexchange_mpi.f90 b/test/mappedvectordgdivergence_2d_linear_sideexchange_mpi.f90 index 42eed794c..f5bb9724e 100644 --- a/test/mappedvectordgdivergence_2d_linear_sideexchange_mpi.f90 +++ b/test/mappedvectordgdivergence_2d_linear_sideexchange_mpi.f90 @@ -65,7 +65,7 @@ integer function mappedvectordgdivergence_2d_linear() result(r) ! Create a uniform block mesh call get_environment_variable("WORKSPACE",WORKSPACE) - call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block2D/Block2D_mesh.h5",enableDomainDecomposition=.true.) + call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block2D/Block2D_mesh.h5") ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/mappedvectordgdivergence_2d_linear_structuredmesh_mpi.f90 b/test/mappedvectordgdivergence_2d_linear_structuredmesh_mpi.f90 index d5d5632a5..484fd0ce5 100644 --- a/test/mappedvectordgdivergence_2d_linear_structuredmesh_mpi.f90 +++ b/test/mappedvectordgdivergence_2d_linear_structuredmesh_mpi.f90 @@ -69,7 +69,7 @@ integer function mappedvectordgdivergence_2d_linear() result(r) SELF_BC_PRESCRIBED, & ! East SELF_BC_PRESCRIBED, & ! North SELF_BC_PRESCRIBED] ! West - call mesh%StructuredMesh(10,10,2,2,0.05_prec,0.05_prec,bcids,enableDomainDecomposition=.true.) + call mesh%StructuredMesh(10,10,2,2,0.05_prec,0.05_prec,bcids) ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/mappedvectordgdivergence_3d_linear_mpi.f90 b/test/mappedvectordgdivergence_3d_linear_mpi.f90 index 3187c1152..92c8f88d1 100644 --- a/test/mappedvectordgdivergence_3d_linear_mpi.f90 +++ b/test/mappedvectordgdivergence_3d_linear_mpi.f90 @@ -65,7 +65,7 @@ integer function mappedvectordgdivergence_3d_linear() result(r) ! Create a uniform block mesh call get_environment_variable("WORKSPACE",WORKSPACE) - call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block3D/Block3D_mesh.h5",enableDomainDecomposition=.true.) + call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block3D/Block3D_mesh.h5") ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/mappedvectordgdivergence_3d_linear_sideexchange_mpi.f90 b/test/mappedvectordgdivergence_3d_linear_sideexchange_mpi.f90 index b48d5402b..62f59173e 100644 --- a/test/mappedvectordgdivergence_3d_linear_sideexchange_mpi.f90 +++ b/test/mappedvectordgdivergence_3d_linear_sideexchange_mpi.f90 @@ -65,7 +65,7 @@ integer function mappedvectordgdivergence_3d_linear() result(r) ! Create a uniform block mesh call get_environment_variable("WORKSPACE",WORKSPACE) - call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block3D/Block3D_mesh.h5",enableDomainDecomposition=.true.) + call mesh%Read_HOPr(trim(WORKSPACE)//"/share/mesh/Block3D/Block3D_mesh.h5") ! Create an interpolant call interp%Init(N=controlDegree, & diff --git a/test/mappedvectordgdivergence_3d_linear_structuredmesh_mpi.f90 b/test/mappedvectordgdivergence_3d_linear_structuredmesh_mpi.f90 index 9a4b2fbfe..d8d13418b 100644 --- a/test/mappedvectordgdivergence_3d_linear_structuredmesh_mpi.f90 +++ b/test/mappedvectordgdivergence_3d_linear_structuredmesh_mpi.f90 @@ -74,7 +74,7 @@ integer function mappedvectordgdivergence_3d_linear() result(r) call mesh%StructuredMesh(5,5,5, & 2,2,2, & 0.1_prec,0.1_prec,0.1_prec, & - bcids,enableDomainDecomposition=.true.) + bcids) ! Create an interpolant call interp%Init(N=controlDegree, &