Skip to content

Commit

Permalink
Better implementation of MPI grids (#258)
Browse files Browse the repository at this point in the history
* working on parallel grid improvements

* communicate indices of entities after local grid creation

* move entity index communication into ParallelGrid::new

* simplifications

* parallel single element grid

* None,

* use hashmaps instead of pairs of vecs

* use trait for parallel grid setup instead of code duplication

* add extra_cell_info

* working on mixed grid

* make mixed grid work

* pub(crate) for internal implementation crate

* remove unused code, and use pub(crate) for internal functions

* more pub(crate)
  • Loading branch information
mscroggs authored May 16, 2024
1 parent dc022c4 commit dc484d5
Show file tree
Hide file tree
Showing 17 changed files with 934 additions and 1,216 deletions.
62 changes: 7 additions & 55 deletions examples/test_parallel_grid.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,15 @@ use bempp::{
function::{ParallelFunctionSpace, SerialFunctionSpace},
grid::{
flat_triangle_grid::{FlatTriangleGrid, FlatTriangleGridBuilder},
//mixed_grid::{MixedGrid, MixedGridBuilder},
mixed_grid::{MixedGrid, MixedGridBuilder},
parallel_grid::ParallelGrid,
//single_element_grid::{SingleElementGrid, SingleElementGridBuilder},
single_element_grid::{SingleElementGrid, SingleElementGridBuilder},
},
traits::{
element::Continuity,
function::FunctionSpace,
grid::{Builder, CellType, GeometryType, GridType, ParallelBuilder, PointType},
types::Ownership,
// types::{Ownership, ReferenceCellType},
types::{Ownership, ReferenceCellType},
},
};
#[cfg(feature = "mpi")]
Expand All @@ -29,8 +28,7 @@ use mpi::{
traits::{Communicator, Destination, Source},
};
#[cfg(feature = "mpi")]
use rlst::CsrMatrix;
// use rlst::{CsrMatrix, Shape};
use rlst::{CsrMatrix, Shape};
#[cfg(feature = "mpi")]
use std::collections::HashMap;

Expand Down Expand Up @@ -102,7 +100,6 @@ fn example_flat_triangle_grid_serial(n: usize) -> FlatTriangleGrid<f64> {
create_flat_triangle_grid_data(&mut b, n);
b.create_grid()
}
/*
#[cfg(feature = "mpi")]
fn create_single_element_grid_data(b: &mut SingleElementGridBuilder<3, f64>, n: usize) {
for y in 0..n {
Expand Down Expand Up @@ -227,7 +224,7 @@ fn example_mixed_grid_serial(n: usize) -> MixedGrid<f64> {
create_mixed_grid_data(&mut b, n);
b.create_grid()
}
*/

#[cfg(feature = "mpi")]
fn test_parallel_flat_triangle_grid<C: Communicator>(comm: &C) {
let rank = comm.rank();
Expand Down Expand Up @@ -364,7 +361,6 @@ fn test_parallel_assembly_flat_triangle_grid<C: Communicator>(
});
}
}
/*
#[cfg(feature = "mpi")]
fn test_parallel_assembly_single_element_grid<C: Communicator>(
comm: &C,
Expand All @@ -375,7 +371,6 @@ fn test_parallel_assembly_single_element_grid<C: Communicator>(
let size = comm.size();

let n = 10;
let n = 3;
let grid = example_single_element_grid(comm, n);
let element = LagrangeElementFamily::<f64>::new(degree, cont);
let space = ParallelFunctionSpace::new(&grid, &element);
Expand All @@ -384,40 +379,6 @@ fn test_parallel_assembly_single_element_grid<C: Communicator>(

let matrix = a.parallel_assemble_singular_into_csr(&space, &space);

fn print_matrix(m: &CsrMatrix<f64>) {
let mut row = 0;
let mut col = 0;
println!("{:?}", m.shape());
for (i, j) in m.indices().iter().enumerate() {
while i >= m.indptr()[row + 1] {
for _ in col..m.shape()[1] {
print!("0. ");
}
println!();
col = 0;
row += 1;
}
while col < *j {
print!("0. ");
col += 1;
}
print!("{:.5} ", m.data()[i]);
col += 1;
}
for _ in col..m.shape()[1] {
print!("0. ");
}
col = 0;
row += 1;
println!();
for _ in row..m.shape()[0] {
for _ in 0..m.shape()[1] {
print!("0. ");
}
println!();
}
}
if rank == 0 {
// Gather sparse matrices onto process 0
let mut rows = vec![];
Expand All @@ -439,12 +400,7 @@ fn test_parallel_assembly_single_element_grid<C: Communicator>(
let (indices, _status) = process.receive_vec::<usize>();
let (indptr, _status) = process.receive_vec::<usize>();
let (subdata, _status) = process.receive_vec::<f64>();
let mat = CsrMatrix::new(
matrix.shape(),
indices,
indptr,
subdata,
);
let mat = CsrMatrix::new(matrix.shape(), indices, indptr, subdata);

let mut r = 0;
for (i, index) in mat.indices().iter().enumerate() {
Expand Down Expand Up @@ -580,7 +536,7 @@ fn test_parallel_assembly_mixed_grid<C: Communicator>(comm: &C, degree: usize, c
});
}
}
*/

#[cfg(feature = "mpi")]
fn main() {
let universe: Universe = mpi::initialize().unwrap();
Expand All @@ -596,7 +552,6 @@ fn main() {
println!("Testing assembly with DP{degree} using FlatTriangleGrid in parallel.");
}
test_parallel_assembly_flat_triangle_grid(&world, degree, Continuity::Discontinuous);
/*
if rank == 0 {
println!("Testing assembly with DP{degree} using SingleElementGrid in parallel.");
}
Expand All @@ -605,14 +560,12 @@ fn main() {
println!("Testing assembly with DP{degree} using MixedGrid in parallel.");
}
test_parallel_assembly_mixed_grid(&world, degree, Continuity::Discontinuous);
*/
}
for degree in 1..4 {
if rank == 0 {
println!("Testing assembly with P{degree} using FlatTriangleGrid in parallel.");
}
test_parallel_assembly_flat_triangle_grid(&world, degree, Continuity::Continuous);
/*
if rank == 0 {
println!("Testing assembly with P{degree} using SingleElementGrid in parallel.");
}
Expand All @@ -621,7 +574,6 @@ fn main() {
println!("Testing assembly with P{degree} using MixedGrid in parallel.");
}
test_parallel_assembly_mixed_grid(&world, degree, Continuity::Continuous);
*/
}
}
#[cfg(not(feature = "mpi"))]
Expand Down
6 changes: 3 additions & 3 deletions src/assembly/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
use rlst::RlstScalar;

/// Raw 2D data
pub struct RawData2D<T: RlstScalar> {
pub(crate) struct RawData2D<T: RlstScalar> {
/// Array containting data
pub data: *mut T,
pub(crate) data: *mut T,
/// Shape of data
pub shape: [usize; 2],
pub(crate) shape: [usize; 2],
}

unsafe impl<T: RlstScalar> Sync for RawData2D<T> {}
Expand Down
Loading

0 comments on commit dc484d5

Please sign in to comment.