diff --git a/examples/test_parallel_grid.rs b/examples/test_parallel_grid.rs index 4097829f..268d70db 100644 --- a/examples/test_parallel_grid.rs +++ b/examples/test_parallel_grid.rs @@ -10,16 +10,15 @@ use bempp::{ function::{ParallelFunctionSpace, SerialFunctionSpace}, grid::{ flat_triangle_grid::{FlatTriangleGrid, FlatTriangleGridBuilder}, - //mixed_grid::{MixedGrid, MixedGridBuilder}, + mixed_grid::{MixedGrid, MixedGridBuilder}, parallel_grid::ParallelGrid, - //single_element_grid::{SingleElementGrid, SingleElementGridBuilder}, + single_element_grid::{SingleElementGrid, SingleElementGridBuilder}, }, traits::{ element::Continuity, function::FunctionSpace, grid::{Builder, CellType, GeometryType, GridType, ParallelBuilder, PointType}, - types::Ownership, - // types::{Ownership, ReferenceCellType}, + types::{Ownership, ReferenceCellType}, }, }; #[cfg(feature = "mpi")] @@ -29,8 +28,7 @@ use mpi::{ traits::{Communicator, Destination, Source}, }; #[cfg(feature = "mpi")] -use rlst::CsrMatrix; -// use rlst::{CsrMatrix, Shape}; +use rlst::{CsrMatrix, Shape}; #[cfg(feature = "mpi")] use std::collections::HashMap; @@ -102,7 +100,6 @@ fn example_flat_triangle_grid_serial(n: usize) -> FlatTriangleGrid { create_flat_triangle_grid_data(&mut b, n); b.create_grid() } -/* #[cfg(feature = "mpi")] fn create_single_element_grid_data(b: &mut SingleElementGridBuilder<3, f64>, n: usize) { for y in 0..n { @@ -227,7 +224,7 @@ fn example_mixed_grid_serial(n: usize) -> MixedGrid { create_mixed_grid_data(&mut b, n); b.create_grid() } -*/ + #[cfg(feature = "mpi")] fn test_parallel_flat_triangle_grid(comm: &C) { let rank = comm.rank(); @@ -364,7 +361,6 @@ fn test_parallel_assembly_flat_triangle_grid( }); } } -/* #[cfg(feature = "mpi")] fn test_parallel_assembly_single_element_grid( comm: &C, @@ -375,7 +371,6 @@ fn test_parallel_assembly_single_element_grid( let size = comm.size(); let n = 10; - let n = 3; let grid = example_single_element_grid(comm, n); let element = LagrangeElementFamily::::new(degree, cont); let space = ParallelFunctionSpace::new(&grid, &element); @@ -384,40 +379,6 @@ fn test_parallel_assembly_single_element_grid( let matrix = a.parallel_assemble_singular_into_csr(&space, &space); - fn print_matrix(m: &CsrMatrix) { - let mut row = 0; - let mut col = 0; - println!("{:?}", m.shape()); - for (i, j) in m.indices().iter().enumerate() { - while i >= m.indptr()[row + 1] { - for _ in col..m.shape()[1] { - print!("0. "); - } - println!(); - col = 0; - row += 1; - } - while col < *j { - print!("0. "); - col += 1; - } - print!("{:.5} ", m.data()[i]); - col += 1; - } - for _ in col..m.shape()[1] { - print!("0. "); - } - col = 0; - row += 1; - println!(); - for _ in row..m.shape()[0] { - for _ in 0..m.shape()[1] { - print!("0. "); - } - println!(); - } - } - if rank == 0 { // Gather sparse matrices onto process 0 let mut rows = vec![]; @@ -439,12 +400,7 @@ fn test_parallel_assembly_single_element_grid( let (indices, _status) = process.receive_vec::(); let (indptr, _status) = process.receive_vec::(); let (subdata, _status) = process.receive_vec::(); - let mat = CsrMatrix::new( - matrix.shape(), - indices, - indptr, - subdata, - ); + let mat = CsrMatrix::new(matrix.shape(), indices, indptr, subdata); let mut r = 0; for (i, index) in mat.indices().iter().enumerate() { @@ -580,7 +536,7 @@ fn test_parallel_assembly_mixed_grid(comm: &C, degree: usize, c }); } } -*/ + #[cfg(feature = "mpi")] fn main() { let universe: Universe = mpi::initialize().unwrap(); @@ -596,7 +552,6 @@ fn main() { println!("Testing assembly with DP{degree} using FlatTriangleGrid in parallel."); } test_parallel_assembly_flat_triangle_grid(&world, degree, Continuity::Discontinuous); - /* if rank == 0 { println!("Testing assembly with DP{degree} using SingleElementGrid in parallel."); } @@ -605,14 +560,12 @@ fn main() { println!("Testing assembly with DP{degree} using MixedGrid in parallel."); } test_parallel_assembly_mixed_grid(&world, degree, Continuity::Discontinuous); - */ } for degree in 1..4 { if rank == 0 { println!("Testing assembly with P{degree} using FlatTriangleGrid in parallel."); } test_parallel_assembly_flat_triangle_grid(&world, degree, Continuity::Continuous); - /* if rank == 0 { println!("Testing assembly with P{degree} using SingleElementGrid in parallel."); } @@ -621,7 +574,6 @@ fn main() { println!("Testing assembly with P{degree} using MixedGrid in parallel."); } test_parallel_assembly_mixed_grid(&world, degree, Continuity::Continuous); - */ } } #[cfg(not(feature = "mpi"))] diff --git a/src/assembly/common.rs b/src/assembly/common.rs index 76bb208f..fdaf4dac 100644 --- a/src/assembly/common.rs +++ b/src/assembly/common.rs @@ -2,11 +2,11 @@ use rlst::RlstScalar; /// Raw 2D data -pub struct RawData2D { +pub(crate) struct RawData2D { /// Array containting data - pub data: *mut T, + pub(crate) data: *mut T, /// Shape of data - pub shape: [usize; 2], + pub(crate) shape: [usize; 2], } unsafe impl Sync for RawData2D {} diff --git a/src/grid/common.rs b/src/grid/common.rs index 1ec9ef69..3c4b11c9 100644 --- a/src/grid/common.rs +++ b/src/grid/common.rs @@ -5,122 +5,24 @@ use num::Float; use rlst::RlstScalar; use rlst::{Array, Shape, UnsafeRandomAccessByRef, UnsafeRandomAccessByValue}; -/// Compute a physical point -pub fn compute_point< - T: Float + RlstScalar, - Table: UnsafeRandomAccessByRef<4, Item = T> + Shape<4>, ->( - geometry: &impl Geometry, - table: Table, - cell_index: usize, - point_index: usize, - point: &mut [T], -) { - assert_eq!(geometry.dim(), point.len()); - - let cell = geometry.index_map()[cell_index]; - - for component in point.iter_mut() { - *component = T::from(0.0).unwrap(); - } - for (i, v) in geometry.cell_points(cell).unwrap().iter().enumerate() { - let t = unsafe { *table.get_unchecked([0, point_index, i, 0]) }; - for (j, component) in point.iter_mut().enumerate() { - *component += *geometry.coordinate(*v, j).unwrap() * t; - } - } -} - -/// Compute a Jacobian -pub fn compute_jacobian< - T: Float + RlstScalar, - Table: UnsafeRandomAccessByRef<4, Item = T> + Shape<4>, ->( - geometry: &impl Geometry, - table: Table, - tdim: usize, - cell_index: usize, - point_index: usize, - jacobian: &mut [T], -) { - let gdim = geometry.dim(); - assert_eq!(jacobian.len(), gdim * tdim); - - let cell = geometry.index_map()[cell_index]; - - for component in jacobian.iter_mut() { - *component = T::from(0.0).unwrap(); - } - for (i, v) in geometry.cell_points(cell).unwrap().iter().enumerate() { - for gd in 0..gdim { - for td in 0..tdim { - jacobian[td * gdim + gd] += *geometry.coordinate(*v, gd).unwrap() - * unsafe { *table.get_unchecked([1 + td, point_index, i, 0]) }; - } - } - } -} - -/// Compute a normal from a Jacobian of a cell with topological dimension 2 and geometric dimension 3 -pub fn compute_normal_from_jacobian23>( - jacobian: &[T], - normal: &mut [T], -) { - assert_eq!(jacobian.len(), 6); - assert_eq!(normal.len(), 3); - - for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)] { - normal[i] = jacobian[j] * jacobian[3 + k] - jacobian[k] * jacobian[3 + j]; - } - let size = RlstScalar::sqrt(normal.iter().map(|&x| RlstScalar::powi(x, 2)).sum::()); - for i in normal.iter_mut() { - *i /= size; - } -} - -/// Compute a normal from a Jacobian -pub fn compute_normal_from_jacobian>( - jacobian: &[T], - normal: &mut [T], - tdim: usize, - gdim: usize, -) { - assert_eq!(jacobian.len(), tdim * gdim); - assert_eq!(normal.len(), gdim); - - match tdim { - 2 => match gdim { - 3 => compute_normal_from_jacobian23(jacobian, normal), - _ => { - unimplemented!("compute_normal_from_jacobian() not implemented for topological dimension {tdim} and geometric dimension: {gdim}"); - } - }, - _ => { - unimplemented!( - "compute_normal_from_jacobian() not implemented for topological dimension {tdim}" - ); - } - } -} - /// Compute the determinant of a 1 by 1 matrix -pub fn compute_det11>(jacobian: &[T]) -> T { +pub(crate) fn compute_det11>(jacobian: &[T]) -> T { T::abs(jacobian[0]) } /// Compute the determinant of a 1 by 2 matrix -pub fn compute_det12>(jacobian: &[T]) -> T { +pub(crate) fn compute_det12>(jacobian: &[T]) -> T { T::sqrt(jacobian.iter().map(|x| x.powi(2)).sum()) } /// Compute the determinant of a 1 by 3 matrix -pub fn compute_det13>(jacobian: &[T]) -> T { +pub(crate) fn compute_det13>(jacobian: &[T]) -> T { T::sqrt(jacobian.iter().map(|x| x.powi(2)).sum()) } /// Compute the determinant of a 2 by 2 matrix -pub fn compute_det22>(jacobian: &[T]) -> T { +pub(crate) fn compute_det22>(jacobian: &[T]) -> T { T::abs(jacobian[0] * jacobian[3] - jacobian[1] * jacobian[2]) } /// Compute the determinant of a 2 by 3 matrix -pub fn compute_det23>(jacobian: &[T]) -> T { +pub(crate) fn compute_det23>(jacobian: &[T]) -> T { T::sqrt( [(1, 2), (2, 0), (0, 1)] .iter() @@ -131,7 +33,7 @@ pub fn compute_det23>(jacobian: &[T]) -> T { ) } /// Compute the determinant of a 3 by 3 matrix -pub fn compute_det33>(jacobian: &[T]) -> T { +pub(crate) fn compute_det33>(jacobian: &[T]) -> T { T::abs( [(0, 1, 2), (1, 2, 0), (2, 0, 1)] .iter() @@ -144,7 +46,7 @@ pub fn compute_det33>(jacobian: &[T]) -> T { } /// Compute the determinant of a matrix -pub fn compute_det>(jacobian: &[T], tdim: usize, gdim: usize) -> T { +pub(crate) fn compute_det>(jacobian: &[T], tdim: usize, gdim: usize) -> T { assert_eq!(jacobian.len(), tdim * gdim); match tdim { 1 => match gdim { @@ -175,7 +77,7 @@ pub fn compute_det>(jacobian: &[T], tdim: usize, gdim: u } /// Compute physical points -pub fn compute_points< +pub(crate) fn compute_points< T: Float + RlstScalar, Table: UnsafeRandomAccessByRef<4, Item = T> + Shape<4>, >( @@ -204,7 +106,7 @@ pub fn compute_points< } /// Compute Jacobians -pub fn compute_jacobians< +pub(crate) fn compute_jacobians< T: Float + RlstScalar, Table: UnsafeRandomAccessByRef<4, Item = T> + Shape<4>, >( @@ -237,7 +139,7 @@ pub fn compute_jacobians< } /// Compute normals from a Jacobians of a cell with topological dimension 2 and geometric dimension 3 -pub fn compute_normals_from_jacobians23>( +pub(crate) fn compute_normals_from_jacobians23>( jacobians: &[T], normals: &mut [T], ) { @@ -262,54 +164,28 @@ pub fn compute_normals_from_jacobians23>( } } -/// Compute normals from Jacobians -pub fn compute_normals_from_jacobians>( - jacobians: &[T], - normals: &mut [T], - tdim: usize, - gdim: usize, -) { - let npts = normals.len() / gdim; - assert_eq!(jacobians.len(), tdim * gdim * npts); - assert_eq!(normals.len(), gdim * npts); - - match tdim { - 2 => match gdim { - 3 => compute_normals_from_jacobians23(jacobians, normals), - _ => { - unimplemented!("compute_normals_from_jacobians() not implemented for topological dimension {tdim} and geometric dimension: {gdim}"); - } - }, - _ => { - unimplemented!( - "compute_normals_from_jacobians() not implemented for topological dimension {tdim}" - ); - } - } -} - /// Compute determinants of 1 by 1 matrices -pub fn compute_dets11>(jacobian: &[T], jdets: &mut [T]) { +pub(crate) fn compute_dets11>(jacobian: &[T], jdets: &mut [T]) { for (i, jdet) in jdets.iter_mut().enumerate() { *jdet = T::abs(jacobian[i]); } } /// Compute determinants of 1 by 2 matrices -pub fn compute_dets12>(jacobian: &[T], jdets: &mut [T]) { +pub(crate) fn compute_dets12>(jacobian: &[T], jdets: &mut [T]) { let npts = jdets.len(); for (i, jdet) in jdets.iter_mut().enumerate() { *jdet = T::sqrt((0..2).map(|j| jacobian[j * npts + i].powi(2)).sum()); } } /// Compute determinants of 1 by 3 matrices -pub fn compute_dets13>(jacobian: &[T], jdets: &mut [T]) { +pub(crate) fn compute_dets13>(jacobian: &[T], jdets: &mut [T]) { let npts = jdets.len(); for (i, jdet) in jdets.iter_mut().enumerate() { *jdet = T::sqrt((0..3).map(|j| jacobian[j * npts + i].powi(2)).sum()); } } /// Compute determinants of 2 by 2 matrices -pub fn compute_dets22>(jacobian: &[T], jdets: &mut [T]) { +pub(crate) fn compute_dets22>(jacobian: &[T], jdets: &mut [T]) { let npts = jdets.len(); for (i, jdet) in jdets.iter_mut().enumerate() { *jdet = T::abs( @@ -318,7 +194,7 @@ pub fn compute_dets22>(jacobian: &[T], jdets: &mut [T]) } } /// Compute determinants of 2 by 3 matrices -pub fn compute_dets23>(jacobian: &[T], jdets: &mut [T]) { +pub(crate) fn compute_dets23>(jacobian: &[T], jdets: &mut [T]) { let npts = jdets.len(); for (i, jdet) in jdets.iter_mut().enumerate() { *jdet = T::sqrt( @@ -334,7 +210,7 @@ pub fn compute_dets23>(jacobian: &[T], jdets: &mut [T]) } } /// Compute determinants of 3 by 3 matrices -pub fn compute_dets33>(jacobian: &[T], jdets: &mut [T]) { +pub(crate) fn compute_dets33>(jacobian: &[T], jdets: &mut [T]) { let npts = jdets.len(); for (p_i, jdet) in jdets.iter_mut().enumerate() { *jdet = T::abs( @@ -351,7 +227,7 @@ pub fn compute_dets33>(jacobian: &[T], jdets: &mut [T]) } /// Compute determinants of matrices -pub fn compute_dets>( +pub(crate) fn compute_dets>( jacobians: &[T], tdim: usize, gdim: usize, @@ -388,7 +264,7 @@ pub fn compute_dets>( } /// Compute the diameter of a triangle -pub fn compute_diameter_triangle< +pub(crate) fn compute_diameter_triangle< T: Float + Float + RlstScalar, ArrayImpl: UnsafeRandomAccessByValue<1, Item = T> + Shape<1>, >( @@ -403,7 +279,7 @@ pub fn compute_diameter_triangle< } /// Compute the diameter of a quadrilateral -pub fn compute_diameter_quadrilateral< +pub(crate) fn compute_diameter_quadrilateral< T: Float + RlstScalar, ArrayImpl: UnsafeRandomAccessByValue<1, Item = T> + Shape<1>, >( diff --git a/src/grid/flat_triangle_grid/grid.rs b/src/grid/flat_triangle_grid/grid.rs index 33d939c7..cc54352b 100644 --- a/src/grid/flat_triangle_grid/grid.rs +++ b/src/grid/flat_triangle_grid/grid.rs @@ -37,9 +37,11 @@ pub struct FlatTriangleGrid> { entities_to_cells: Vec>>>, entity_types: Vec, - // Point and cell ids + // Point, edge and cell ids point_indices_to_ids: Vec, point_ids_to_indices: HashMap, + edge_indices_to_ids: Vec, + edge_ids_to_indices: HashMap, cell_indices_to_ids: Vec, cell_ids_to_indices: HashMap, } @@ -129,8 +131,6 @@ where let mut entities_to_cells = vec![vec![]; 3]; let mut entities_to_vertices = vec![vec![]; 2]; - let mut edge_indices = HashMap::new(); - entities_to_cells[2] = vec![vec![]; ncells]; entities_to_vertices[0] = (0..nvertices).map(|i| vec![i]).collect::>(); entities_to_cells[0] = vec![vec![]; nvertices]; @@ -145,10 +145,22 @@ where cells_to_entities[0][cell_i].extend_from_slice(cell); cells_to_entities[2][cell_i] = vec![cell_i]; } - if let Some(e) = edge_ids { - for (i, j) in e.into_iter() { - edge_indices.insert((i[0], i[1]), j); - entities_to_vertices[1].push(vec![i[0], i[1]]); + + let mut edge_indices = HashMap::new(); + let mut edge_indices_to_ids = vec![]; + let mut edge_ids_to_indices = HashMap::new(); + if let Some(e) = &edge_ids { + for (edge_i, (i, j)) in e.iter().enumerate() { + let mut v0 = point_ids_to_indices[&i[0]]; + let mut v1 = point_ids_to_indices[&i[1]]; + if v0 > v1 { + std::mem::swap(&mut v0, &mut v1); + } + + edge_indices.insert((v0, v1), edge_i); + edge_indices_to_ids.push(*j); + edge_ids_to_indices.insert(*j, edge_i); + entities_to_vertices[1].push(vec![v0, v1]); entities_to_cells[1].push(vec![]); } } @@ -166,7 +178,13 @@ where entities_to_cells[1][*edge_index] .push(CellLocalIndexPair::new(cell_i, local_index)); } else { - edge_indices.insert((first, second), entities_to_vertices[1].len()); + if edge_ids.is_some() { + panic!("Missing id for edge"); + } + let id = entities_to_vertices[1].len(); + edge_indices.insert((first, second), id); + edge_indices_to_ids.push(id); + edge_ids_to_indices.insert(id, id); cells_to_entities[1][cell_i].push(entities_to_vertices[1].len()); entities_to_cells[1].push(vec![CellLocalIndexPair::new(cell_i, local_index)]); entities_to_vertices[1].push(vec![first, second]); @@ -189,6 +207,8 @@ where entity_types, point_indices_to_ids, point_ids_to_indices, + edge_indices_to_ids, + edge_ids_to_indices, cell_indices_to_ids, cell_ids_to_indices, } @@ -451,6 +471,12 @@ impl> Topology for FlatTriangleGrid { fn vertex_id_to_index(&self, id: usize) -> usize { self.point_ids_to_indices[&id] } + fn edge_id_to_index(&self, id: usize) -> usize { + self.edge_ids_to_indices[&id] + } + fn edge_index_to_id(&self, index: usize) -> usize { + self.edge_indices_to_ids[index] + } fn cell_id_to_index(&self, id: usize) -> usize { self.cell_ids_to_indices[&id] } diff --git a/src/grid/flat_triangle_grid/parallel.rs b/src/grid/flat_triangle_grid/parallel.rs index fe89ac89..f74677f6 100644 --- a/src/grid/flat_triangle_grid/parallel.rs +++ b/src/grid/flat_triangle_grid/parallel.rs @@ -1,387 +1,63 @@ //! Parallel grid builder -use crate::element::reference_cell; use crate::grid::flat_triangle_grid::{FlatTriangleGrid, FlatTriangleGridBuilder}; -use crate::grid::parallel_grid::ParallelGrid; -use crate::traits::grid::ParallelBuilder; -use crate::traits::types::{Ownership, ReferenceCellType}; -use mpi::{ - request::WaitGuard, - topology::Communicator, - traits::{Buffer, Destination, Equivalence, Source}, -}; +use crate::grid::parallel_grid::ParallelGridBuilder; +use crate::traits::types::ReferenceCellType; +use mpi::traits::{Buffer, Equivalence}; use num::Float; use rlst::{ - dense::array::views::ArrayViewMut, rlst_dynamic_array2, Array, BaseArray, MatrixInverse, - RandomAccessMut, RlstScalar, VectorContainer, + dense::array::views::ArrayViewMut, Array, BaseArray, MatrixInverse, RlstScalar, VectorContainer, }; use std::collections::HashMap; -impl + Equivalence> ParallelBuilder<3> +impl + Equivalence> ParallelGridBuilder for FlatTriangleGridBuilder where for<'a> Array, 2>, 2>, 2>: MatrixInverse, [T]: Buffer, { - type ParallelGridType<'a, C: Communicator + 'a> = ParallelGrid<'a, C, FlatTriangleGrid>; - fn create_parallel_grid<'a, C: Communicator>( - self, - comm: &'a C, - cell_owners: &HashMap, - ) -> ParallelGrid<'a, C, FlatTriangleGrid> { - let rank = comm.rank() as usize; - let size = comm.size() as usize; - - let npts = self.point_indices_to_ids.len(); - let ncells = self.cell_indices_to_ids.len(); - - // data used in computation - let mut vertex_owners = vec![(-1, 0); npts]; - let mut vertex_counts = vec![0; size]; - let mut cell_indices_per_proc = vec![vec![]; size]; - let mut vertex_indices_per_proc = vec![vec![]; size]; - let mut edge_owners = HashMap::new(); - let mut edge_counts = vec![0; size]; - let mut edges_per_proc = vec![vec![]; size]; - - // data to send to other processes - let mut points_per_proc = vec![vec![]; size]; - let mut cells_per_proc = vec![vec![]; size]; - let mut point_ids_per_proc = vec![vec![]; size]; - let mut cell_ids_per_proc = vec![vec![]; size]; - let mut vertex_owners_per_proc = vec![vec![]; size]; - let mut vertex_local_indices_per_proc = vec![vec![]; size]; - let mut edge_vertices0_per_proc = vec![vec![]; size]; - let mut edge_vertices1_per_proc = vec![vec![]; size]; - let mut edge_owners_per_proc = vec![vec![]; size]; - let mut edge_local_indices_per_proc = vec![vec![]; size]; - let mut cell_owners_per_proc = vec![vec![]; size]; - let mut cell_local_indices_per_proc = vec![vec![]; size]; - - for (index, id) in self.cell_indices_to_ids.iter().enumerate() { - let owner = cell_owners[id]; - for v in &self.cells[3 * index..3 * (index + 1)] { - if vertex_owners[*v].0 == -1 { - vertex_owners[*v] = (owner as i32, vertex_counts[owner]); - } - if !vertex_indices_per_proc[owner].contains(v) { - vertex_indices_per_proc[owner].push(*v); - vertex_owners_per_proc[owner].push(vertex_owners[*v].0 as usize); - vertex_local_indices_per_proc[owner].push(vertex_owners[*v].1); - for i in 0..3 { - points_per_proc[owner].push(self.points[v * 3 + i]) - } - point_ids_per_proc[owner].push(self.point_indices_to_ids[*v]); - vertex_counts[owner] += 1; - } - } - } - - let ref_conn = &reference_cell::connectivity(ReferenceCellType::Triangle)[1]; - - for (index, id) in self.cell_indices_to_ids.iter().enumerate() { - let owner = cell_owners[id]; - for e in ref_conn { - let v0 = self.cells[3 * index + e[0][0]]; - let v1 = self.cells[3 * index + e[0][1]]; - let edge = if v0 < v1 { [v0, v1] } else { [v1, v0] }; - let local_v0 = vertex_indices_per_proc[owner] - .iter() - .position(|&r| r == v0) - .unwrap(); - let local_v1 = vertex_indices_per_proc[owner] - .iter() - .position(|&r| r == v1) - .unwrap(); - let local_edge = if local_v0 < local_v1 { - [local_v0, local_v1] - } else { - [local_v1, local_v0] - }; - if edge_owners.get_mut(&edge).is_none() { - edge_owners.insert(edge, (owner, edge_counts[owner])); - edges_per_proc[owner].push(edge); - edge_vertices0_per_proc[owner].push(local_edge[0]); - edge_vertices1_per_proc[owner].push(local_edge[1]); - edge_owners_per_proc[owner].push(edge_owners[&edge].0); - edge_local_indices_per_proc[owner].push(edge_owners[&edge].1); - edge_counts[owner] += 1; - } - } - } - - for index in 0..ncells { - for p in 0..size { - for v in &self.cells[3 * index..3 * (index + 1)] { - if vertex_indices_per_proc[p].contains(v) { - cell_indices_per_proc[p].push(index); - break; - } - } - } - } + type G = FlatTriangleGrid; + type ExtraCellInfo = (); - for p in 0..size { - for index in &cell_indices_per_proc[p] { - let id = self.cell_indices_to_ids[*index]; - for v in &self.cells[3 * index..3 * (index + 1)] { - if !vertex_indices_per_proc[p].contains(v) { - vertex_indices_per_proc[p].push(*v); - vertex_owners_per_proc[p].push(vertex_owners[*v].0 as usize); - vertex_local_indices_per_proc[p].push(vertex_owners[*v].1); - for i in 0..3 { - points_per_proc[p].push(self.points[v * 3 + i]) - } - point_ids_per_proc[p].push(self.point_indices_to_ids[*v]) - } - cells_per_proc[p].push( - vertex_indices_per_proc[p] - .iter() - .position(|&r| r == *v) - .unwrap(), - ); - } + fn new_extra_cell_info(&self) {} - for e in ref_conn { - let v0 = self.cells[3 * index + e[0][0]]; - let v1 = self.cells[3 * index + e[0][1]]; - let edge = if v0 < v1 { [v0, v1] } else { [v1, v0] }; - let local_v0 = vertex_indices_per_proc[p] - .iter() - .position(|&r| r == v0) - .unwrap(); - let local_v1 = vertex_indices_per_proc[p] - .iter() - .position(|&r| r == v1) - .unwrap(); - let local_edge = if local_v0 < local_v1 { - [local_v0, local_v1] - } else { - [local_v1, local_v0] - }; - if !edges_per_proc[p].contains(&edge) { - edges_per_proc[p].push(edge); - edge_vertices0_per_proc[p].push(local_edge[0]); - edge_vertices1_per_proc[p].push(local_edge[1]); - edge_owners_per_proc[p].push(edge_owners[&edge].0); - edge_local_indices_per_proc[p].push(edge_owners[&edge].1); - } - } - - cell_ids_per_proc[p].push(id); - cell_owners_per_proc[p].push(cell_owners[&id]); - cell_local_indices_per_proc[p].push( - cell_indices_per_proc[cell_owners[&id]] - .iter() - .position(|&r| r == *index) - .unwrap(), - ); - } - } - - mpi::request::scope(|scope| { - for p in 1..size { - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &points_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cells_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &point_ids_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &vertex_owners_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &vertex_local_indices_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_ids_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_owners_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_local_indices_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &edge_owners_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &edge_local_indices_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &edge_vertices0_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &edge_vertices1_per_proc[p]), - ); - } - }); - self.create_internal( - comm, - &points_per_proc[rank], - &cells_per_proc[rank], - &point_ids_per_proc[rank], - &vertex_owners_per_proc[rank], - &vertex_local_indices_per_proc[rank], - &cell_ids_per_proc[rank], - &cell_owners_per_proc[rank], - &cell_local_indices_per_proc[rank], - &edge_owners_per_proc[rank], - &edge_local_indices_per_proc[rank], - &edge_vertices0_per_proc[rank], - &edge_vertices1_per_proc[rank], - ) + fn point_indices_to_ids(&self) -> &[usize] { + &self.point_indices_to_ids } - fn receive_parallel_grid( - self, - comm: &C, - root_rank: usize, - ) -> ParallelGrid<'_, C, FlatTriangleGrid> { - let root_process = comm.process_at_rank(root_rank as i32); - - let (points, _status) = root_process.receive_vec::(); - let (cells, _status) = root_process.receive_vec::(); - let (point_ids, _status) = root_process.receive_vec::(); - let (vertex_owners, _status) = root_process.receive_vec::(); - let (vertex_local_indices, _status) = root_process.receive_vec::(); - let (cell_ids, _status) = root_process.receive_vec::(); - let (cell_owners, _status) = root_process.receive_vec::(); - let (cell_local_indices, _status) = root_process.receive_vec::(); - let (edge_owners, _status) = root_process.receive_vec::(); - let (edge_local_indices, _status) = root_process.receive_vec::(); - let (edge_vertices0, _status) = root_process.receive_vec::(); - let (edge_vertices1, _status) = root_process.receive_vec::(); - self.create_internal( - comm, - &points, - &cells, - &point_ids, - &vertex_owners, - &vertex_local_indices, - &cell_ids, - &cell_owners, - &cell_local_indices, - &edge_owners, - &edge_local_indices, - &edge_vertices0, - &edge_vertices1, - ) + fn points(&self) -> &[T] { + &self.points } -} - -impl> FlatTriangleGridBuilder -where - for<'a> Array, 2>, 2>, 2>: MatrixInverse, -{ - #[allow(clippy::too_many_arguments)] - fn create_internal<'a, C: Communicator>( - self, - comm: &'a C, - points: &[T], + fn cell_indices_to_ids(&self) -> &[usize] { + &self.cell_indices_to_ids + } + fn cell_points(&self, index: usize) -> &[usize] { + &self.cells[3 * index..3 * (index + 1)] + } + fn cell_vertices(&self, index: usize) -> &[usize] { + self.cell_points(index) + } + fn cell_type(&self, _index: usize) -> ReferenceCellType { + ReferenceCellType::Triangle + } + fn create_serial_grid( + &self, + points: Array, 2>, 2>, cells: &[usize], - point_ids: &[usize], - vertex_owners: &[usize], - vertex_local_indices: &[usize], - cell_ids: &[usize], - cell_owners: &[usize], - cell_local_indices: &[usize], - edge_owners: &[usize], - edge_local_indices: &[usize], - edge_vertices0: &[usize], - edge_vertices1: &[usize], - ) -> ParallelGrid<'a, C, FlatTriangleGrid> { - let rank = comm.rank() as usize; - let npts = point_ids.len(); - let ncells = cell_ids.len(); - - let mut coordinates = rlst_dynamic_array2!(T, [npts, 3]); - for i in 0..npts { - for j in 0..3 { - *coordinates.get_mut([i, j]).unwrap() = points[i * 3 + j]; - } - } - - let mut point_ids_to_indices = HashMap::new(); - for (index, id) in point_ids.iter().enumerate() { - point_ids_to_indices.insert(*id, index); - } - let mut cell_ids_to_indices = HashMap::new(); - for (index, id) in cell_ids.iter().enumerate() { - cell_ids_to_indices.insert(*id, index); - } - - let mut cell_ownership = HashMap::new(); - for index in 0..ncells { - cell_ownership.insert( - index, - if cell_owners[index] == rank { - Ownership::Owned - } else { - Ownership::Ghost(cell_owners[index], cell_local_indices[index]) - }, - ); - } - let mut vertex_ownership = HashMap::new(); - for index in 0..npts { - vertex_ownership.insert( - index, - if vertex_owners[index] == rank { - Ownership::Owned - } else { - Ownership::Ghost(vertex_owners[index], vertex_local_indices[index]) - }, - ); - } - - let nedges = edge_owners.len(); - let mut edge_ownership = HashMap::new(); - for index in 0..nedges { - edge_ownership.insert( - index, - if edge_owners[index] == rank { - Ownership::Owned - } else { - Ownership::Ghost(edge_owners[index], edge_local_indices[index]) - }, - ); - } - - let mut edge_ids = HashMap::new(); - for (n, (i, j)) in edge_vertices0.iter().zip(edge_vertices1).enumerate() { - edge_ids.insert([*i, *j], n); - } - - let serial_grid = FlatTriangleGrid::new( - coordinates, + point_indices_to_ids: Vec, + point_ids_to_indices: HashMap, + cell_indices_to_ids: Vec, + cell_ids_to_indices: HashMap, + edge_ids: HashMap<[usize; 2], usize>, + _extra_cell_info: &(), + ) -> Self::G { + FlatTriangleGrid::new( + points, cells, - point_ids.to_vec(), + point_indices_to_ids, point_ids_to_indices, - cell_ids.to_vec(), + cell_indices_to_ids, cell_ids_to_indices, Some(edge_ids), - ); - - ParallelGrid::new( - comm, - serial_grid, - vertex_ownership, - edge_ownership, - cell_ownership, ) } } diff --git a/src/grid/mixed_grid.rs b/src/grid/mixed_grid.rs index 2d32b345..cdd0b469 100644 --- a/src/grid/mixed_grid.rs +++ b/src/grid/mixed_grid.rs @@ -6,8 +6,8 @@ mod builder; mod geometry; mod grid; mod io; -//#[cfg(feature = "mpi")] -//mod parallel; +#[cfg(feature = "mpi")] +mod parallel; mod topology; pub use self::builder::MixedGridBuilder; diff --git a/src/grid/mixed_grid/builder.rs b/src/grid/mixed_grid/builder.rs index 066965c8..b27532a4 100644 --- a/src/grid/mixed_grid/builder.rs +++ b/src/grid/mixed_grid/builder.rs @@ -111,6 +111,7 @@ where self.point_indices_to_ids, self.point_ids_to_indices, self.cell_indices_to_ids, + None, ) } } diff --git a/src/grid/mixed_grid/grid.rs b/src/grid/mixed_grid/grid.rs index aa070fdb..d94b44d9 100644 --- a/src/grid/mixed_grid/grid.rs +++ b/src/grid/mixed_grid/grid.rs @@ -35,6 +35,7 @@ where point_indices_to_ids: Vec, point_ids_to_indices: HashMap, cell_indices_to_ids: Vec, + edge_ids: Option>, ) -> Self { let mut element_info = vec![]; let mut element_numbers = vec![]; @@ -71,6 +72,7 @@ where cell_types, &point_indices_to_ids, &cell_indices_to_ids, + edge_ids, ); let geometry = MixedGeometry::::new( points, diff --git a/src/grid/mixed_grid/parallel.rs b/src/grid/mixed_grid/parallel.rs index 0941c42f..0feff52b 100644 --- a/src/grid/mixed_grid/parallel.rs +++ b/src/grid/mixed_grid/parallel.rs @@ -1,297 +1,125 @@ //! Parallel grid builder +use crate::element::reference_cell; use crate::grid::mixed_grid::{MixedGrid, MixedGridBuilder}; -use crate::grid::parallel_grid::ParallelGrid; -use crate::traits::grid::ParallelBuilder; -use crate::traits::types::{Ownership, ReferenceCellType}; +use crate::grid::parallel_grid::ParallelGridBuilder; +use crate::traits::types::ReferenceCellType; use mpi::{ - request::WaitGuard, - topology::Communicator, + request::{LocalScope, WaitGuard}, + topology::Process, traits::{Buffer, Destination, Equivalence, Source}, }; use num::Float; use rlst::{ - dense::array::views::ArrayViewMut, rlst_dynamic_array2, Array, BaseArray, MatrixInverse, - RandomAccessMut, RlstScalar, VectorContainer, + dense::array::views::ArrayViewMut, Array, BaseArray, MatrixInverse, RlstScalar, VectorContainer, }; use std::collections::HashMap; -impl + Equivalence> ParallelBuilder - for MixedGridBuilder +impl + Equivalence> ParallelGridBuilder for MixedGridBuilder<3, T> where for<'a> Array, 2>, 2>, 2>: MatrixInverse, [T]: Buffer, { - type ParallelGridType<'a, C: Communicator + 'a> = ParallelGrid<'a, C, MixedGrid>; - fn create_parallel_grid<'a, C: Communicator>( - self, - comm: &'a C, - cell_owners: &HashMap, - ) -> ParallelGrid<'a, C, MixedGrid> { - let rank = comm.rank() as usize; - let size = comm.size() as usize; + type G = MixedGrid; + type ExtraCellInfo = (Vec, Vec); - let npts = self.point_indices_to_ids.len(); - let ncells = self.cell_indices_to_ids.len(); - - // data used in computation - let mut vertex_owners = vec![(-1, 0); npts]; - let mut vertex_counts = vec![0; size]; - let mut cell_indices_per_proc = vec![vec![]; size]; - let mut vertex_indices_per_proc = vec![vec![]; size]; - - // data to send to other processes - let mut points_per_proc = vec![vec![]; size]; - let mut cells_per_proc = vec![vec![]; size]; - let mut cell_types_per_proc = vec![vec![]; size]; - let mut cell_degrees_per_proc = vec![vec![]; size]; - let mut point_ids_per_proc = vec![vec![]; size]; - let mut cell_ids_per_proc = vec![vec![]; size]; - let mut vertex_owners_per_proc = vec![vec![]; size]; - let mut vertex_local_indices_per_proc = vec![vec![]; size]; - let mut cell_owners_per_proc = vec![vec![]; size]; - let mut cell_local_indices_per_proc = vec![vec![]; size]; - - let mut cell_starts = vec![]; - let mut cell_ends = vec![]; - let mut cell_start = 0; - for index in 0..ncells { - cell_starts.push(cell_start); - cell_start += - self.elements_to_npoints[&(self.cell_types[index], self.cell_degrees[index])]; - cell_ends.push(cell_start); - } - - for (index, id) in self.cell_indices_to_ids.iter().enumerate() { - let owner = cell_owners[id]; - for v in &self.cells[cell_starts[index]..cell_ends[index]] { - if vertex_owners[*v].0 == -1 { - vertex_owners[*v] = (owner as i32, vertex_counts[owner]); - vertex_counts[owner] += 1; - } - if !vertex_indices_per_proc[owner].contains(v) { - vertex_indices_per_proc[owner].push(*v); - vertex_owners_per_proc[owner].push(vertex_owners[*v].0 as usize); - vertex_local_indices_per_proc[owner].push(vertex_owners[*v].1); - for i in 0..GDIM { - points_per_proc[owner].push(self.points[v * GDIM + i]) - } - point_ids_per_proc[owner].push(self.point_indices_to_ids[*v]) - } - } - } + fn new_extra_cell_info(&self) -> (Vec, Vec) { + (vec![], vec![]) + } - for index in 0..ncells { - for p in 0..size { - for v in &self.cells[cell_starts[index]..cell_ends[index]] { - if vertex_indices_per_proc[p].contains(v) { - cell_indices_per_proc[p].push(index); - break; - } - } - } + fn push_extra_cell_info(&self, extra_cell_info: &mut Self::ExtraCellInfo, cell_id: usize) { + extra_cell_info + .0 + .push(self.cell_types[self.cell_ids_to_indices[&cell_id]] as u8); + extra_cell_info + .1 + .push(self.cell_degrees[self.cell_ids_to_indices[&cell_id]]); + } + fn send_extra_cell_info<'a>( + &self, + scope: &LocalScope<'a>, + process: &Process, + extra_cell_info: &'a Self::ExtraCellInfo, + ) { + let _ = WaitGuard::from(process.immediate_send(scope, &extra_cell_info.0)); + let _ = WaitGuard::from(process.immediate_send(scope, &extra_cell_info.1)); + } + fn receive_extra_cell_info( + &self, + root_process: &Process, + extra_cell_info: &mut Self::ExtraCellInfo, + ) { + let (extra0, _status) = root_process.receive_vec::(); + for e in extra0 { + extra_cell_info.0.push(e); } - - for p in 0..size { - for index in &cell_indices_per_proc[p] { - let id = self.cell_indices_to_ids[*index]; - for v in &self.cells[cell_starts[*index]..cell_ends[*index]] { - if !vertex_indices_per_proc[p].contains(v) { - vertex_indices_per_proc[p].push(*v); - vertex_owners_per_proc[p].push(vertex_owners[*v].0 as usize); - vertex_local_indices_per_proc[p].push(vertex_owners[*v].1); - for i in 0..GDIM { - points_per_proc[p].push(self.points[v * GDIM + i]) - } - point_ids_per_proc[p].push(self.point_indices_to_ids[*v]) - } - cells_per_proc[p].push( - vertex_indices_per_proc[p] - .iter() - .position(|&r| r == *v) - .unwrap(), - ); - } - cell_types_per_proc[p].push(self.cell_types[*index] as u8); - cell_degrees_per_proc[p].push(self.cell_degrees[*index]); - cell_ids_per_proc[p].push(id); - cell_owners_per_proc[p].push(cell_owners[&id]); - cell_local_indices_per_proc[p].push( - cell_indices_per_proc[cell_owners[&id]] - .iter() - .position(|&r| r == *index) - .unwrap(), - ); - } + let (extra1, _status) = root_process.receive_vec::(); + for e in extra1 { + extra_cell_info.1.push(e); } + } - mpi::request::scope(|scope| { - for p in 1..size { - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &points_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cells_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_types_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_degrees_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &point_ids_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &vertex_owners_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &vertex_local_indices_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_ids_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_owners_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_local_indices_per_proc[p]), - ); - } - }); - let cell_types = cell_types_per_proc[rank] + fn point_indices_to_ids(&self) -> &[usize] { + &self.point_indices_to_ids + } + fn points(&self) -> &[T] { + &self.points + } + fn cell_indices_to_ids(&self) -> &[usize] { + &self.cell_indices_to_ids + } + fn cell_points(&self, index: usize) -> &[usize] { + let cell_start = self + .cell_types .iter() - .map(|i| ReferenceCellType::from(*i).unwrap()) - .collect::>(); - self.create_internal( - comm, - &points_per_proc[rank], - &cells_per_proc[rank], - &cell_types, - &cell_degrees_per_proc[rank], - &point_ids_per_proc[rank], - &vertex_owners_per_proc[rank], - &vertex_local_indices_per_proc[rank], - &cell_ids_per_proc[rank], - &cell_owners_per_proc[rank], - &cell_local_indices_per_proc[rank], - ) + .zip(&self.cell_degrees) + .take(index) + .map(|(i, j)| self.elements_to_npoints[&(*i, *j)]) + .sum::(); + let cell_npts = + self.elements_to_npoints[&(self.cell_types[index], self.cell_degrees[index])]; + &self.cells[cell_start..cell_start + cell_npts] } - fn receive_parallel_grid( - self, - comm: &C, - root_rank: usize, - ) -> ParallelGrid<'_, C, MixedGrid> { - let root_process = comm.process_at_rank(root_rank as i32); - - let (points, _status) = root_process.receive_vec::(); - let (cells, _status) = root_process.receive_vec::(); - let (cell_types_u8, _status) = root_process.receive_vec::(); - let (cell_degrees, _status) = root_process.receive_vec::(); - let (point_ids, _status) = root_process.receive_vec::(); - let (vertex_owners, _status) = root_process.receive_vec::(); - let (vertex_local_indices, _status) = root_process.receive_vec::(); - let (cell_ids, _status) = root_process.receive_vec::(); - let (cell_owners, _status) = root_process.receive_vec::(); - let (cell_local_indices, _status) = root_process.receive_vec::(); - let cell_types = cell_types_u8 + fn cell_vertices(&self, index: usize) -> &[usize] { + let cell_start = self + .cell_types .iter() - .map(|i| ReferenceCellType::from(*i).unwrap()) - .collect::>(); - self.create_internal( - comm, - &points, - &cells, - &cell_types, - &cell_degrees, - &point_ids, - &vertex_owners, - &vertex_local_indices, - &cell_ids, - &cell_owners, - &cell_local_indices, - ) + .zip(&self.cell_degrees) + .take(index) + .map(|(i, j)| self.elements_to_npoints[&(*i, *j)]) + .sum::(); + let cell_nvertices = reference_cell::entity_counts(self.cell_types[index])[0]; + &self.cells[cell_start..cell_start + cell_nvertices] } -} - -impl> MixedGridBuilder -where - for<'a> Array, 2>, 2>, 2>: MatrixInverse, -{ - #[allow(clippy::too_many_arguments)] - fn create_internal<'a, C: Communicator>( - self, - comm: &'a C, - points: &[T], + fn cell_type(&self, index: usize) -> ReferenceCellType { + self.cell_types[index] + } + fn create_serial_grid( + &self, + points: Array, 2>, 2>, cells: &[usize], - cell_types: &[ReferenceCellType], - cell_degrees: &[usize], - point_ids: &[usize], - vertex_owners: &[usize], - vertex_local_indices: &[usize], - cell_ids: &[usize], - cell_owners: &[usize], - cell_local_indices: &[usize], - ) -> ParallelGrid<'a, C, MixedGrid> { - let rank = comm.rank() as usize; - let npts = point_ids.len(); - let ncells = cell_ids.len(); - - let mut coordinates = rlst_dynamic_array2!(T, [npts, 3]); - for i in 0..npts { - for j in 0..3 { - *coordinates.get_mut([i, j]).unwrap() = points[i * 3 + j]; - } - } - - let mut point_ids_to_indices = HashMap::new(); - for (index, id) in point_ids.iter().enumerate() { - point_ids_to_indices.insert(*id, index); - } - let mut cell_ids_to_indices = HashMap::new(); - for (index, id) in cell_ids.iter().enumerate() { - cell_ids_to_indices.insert(*id, index); - } - - let mut cell_ownership = vec![]; - for index in 0..ncells { - cell_ownership.push(if cell_owners[index] == rank { - Ownership::Owned - } else { - Ownership::Ghost(cell_owners[index], cell_local_indices[index]) - }); - } - let mut vertex_ownership = vec![]; - for index in 0..npts { - vertex_ownership.push(if vertex_owners[index] == rank { - Ownership::Owned - } else { - Ownership::Ghost(vertex_owners[index], vertex_local_indices[index]) - }); - } + point_indices_to_ids: Vec, + point_ids_to_indices: HashMap, + cell_indices_to_ids: Vec, + _cell_ids_to_indices: HashMap, + edge_ids: HashMap<[usize; 2], usize>, + extra_cell_info: &(Vec, Vec), + ) -> Self::G { + let cell_types = extra_cell_info + .0 + .iter() + .map(|e| ReferenceCellType::from(*e).unwrap()) + .collect::>(); - let serial_grid = MixedGrid::new( - coordinates, + MixedGrid::new( + points, cells, - cell_types, - cell_degrees, - point_ids.to_vec(), + &cell_types, + &extra_cell_info.1, + point_indices_to_ids, point_ids_to_indices, - cell_ids.to_vec(), - Some(cell_ownership), - Some(vertex_ownership), - ); - - ParallelGrid::new(comm, serial_grid) + cell_indices_to_ids, + Some(edge_ids), + ) } } diff --git a/src/grid/mixed_grid/topology.rs b/src/grid/mixed_grid/topology.rs index 63088ab8..60e6df30 100644 --- a/src/grid/mixed_grid/topology.rs +++ b/src/grid/mixed_grid/topology.rs @@ -35,8 +35,10 @@ pub struct MixedTopology { entities_to_cells: Vec>>>, entities_to_flat_cells: Vec>>>, entity_types: Vec>, - vertex_indices_to_ids: HashMap, + vertex_indices_to_ids: Vec, vertex_ids_to_indices: HashMap, + edge_indices_to_ids: Vec, + edge_ids_to_indices: HashMap, cell_indices_to_ids: HashMap, cell_ids_to_indices: HashMap, } @@ -50,14 +52,17 @@ impl MixedTopology { cell_types: &[ReferenceCellType], point_indices_to_ids: &[usize], grid_cell_indices_to_ids: &[usize], + edge_ids: Option>, ) -> Self { let mut index_map = vec![(ReferenceCellType::Point, 0); cell_types.len()]; let mut reverse_index_map = HashMap::new(); let mut vertices = vec![]; let dim = reference_cell::dim(cell_types[0]); - let mut vertex_indices_to_ids = HashMap::new(); + let mut vertex_indices_to_ids = vec![]; let mut vertex_ids_to_indices = HashMap::new(); + let mut edge_indices_to_ids = vec![]; + let mut edge_ids_to_indices = HashMap::new(); let mut cell_indices_to_ids = HashMap::new(); let mut cell_ids_to_indices = HashMap::new(); @@ -106,9 +111,9 @@ impl MixedTopology { if !vertices.contains(v) { entities_to_cells[0].push(vec![]); entities_to_flat_cells[0].push(vec![]); + vertex_indices_to_ids.push(point_indices_to_ids[*v]); + vertex_ids_to_indices.insert(point_indices_to_ids[*v], vertices.len()); vertices.push(*v); - vertex_indices_to_ids.insert(*v, point_indices_to_ids[*v]); - vertex_ids_to_indices.insert(point_indices_to_ids[*v], *v); } row.push(vertices.iter().position(|&r| r == *v).unwrap()); } @@ -130,7 +135,60 @@ impl MixedTopology { entities_to_vertices[0].push(vec![i]); } - for d in 1..dim { + let mut edge_indices = HashMap::new(); + if let Some(e) = &edge_ids { + for (edge_i, (i, j)) in e.iter().enumerate() { + let mut v0 = vertex_ids_to_indices[&i[0]]; + let mut v1 = vertex_ids_to_indices[&i[1]]; + if v0 > v1 { + std::mem::swap(&mut v0, &mut v1); + } + edge_indices.insert((v0, v1), edge_i); + edge_indices_to_ids.push(*j); + edge_ids_to_indices.insert(*j, edge_i); + entities_to_vertices[1].push(vec![v0, v1]); + entities_to_cells[1].push(vec![]); + } + } + for cell_type in &entity_types[dim] { + let ref_conn = &reference_cell::connectivity(*cell_type)[1]; + let ncells = cells_to_entities[0][cell_type].len(); + for cell_i in 0..ncells { + cells_to_entities[1] + .get_mut(cell_type) + .unwrap() + .push(vec![]); + let cell_index = (*cell_type, cell_i); + for (local_index, rc) in ref_conn.iter().enumerate() { + let cell = &cells_to_entities[0][cell_type][cell_i]; + let mut first = cell[rc[0][0]]; + let mut second = cell[rc[0][1]]; + if first > second { + std::mem::swap(&mut first, &mut second); + } + if let Some(edge_index) = edge_indices.get(&(first, second)) { + cells_to_entities[1].get_mut(cell_type).unwrap()[cell_i].push(*edge_index); + entities_to_cells[1][*edge_index] + .push(CellLocalIndexPair::new(cell_index, local_index)); + } else { + if edge_ids.is_some() { + panic!("Missing id for edge"); + } + let id = entities_to_vertices[1].len(); + edge_indices.insert((first, second), id); + edge_indices_to_ids.push(id); + edge_ids_to_indices.insert(id, id); + cells_to_entities[1].get_mut(cell_type).unwrap()[cell_i] + .push(entities_to_vertices[1].len()); + entities_to_cells[1] + .push(vec![CellLocalIndexPair::new(cell_index, local_index)]); + entities_to_vertices[1].push(vec![first, second]); + } + } + } + } + + for d in 2..dim { for cell_type in &entity_types[dim] { let mut c_to_e = vec![]; let mut c_to_e_flat = vec![]; @@ -183,6 +241,8 @@ impl MixedTopology { entity_types, vertex_indices_to_ids, vertex_ids_to_indices, + edge_indices_to_ids, + edge_ids_to_indices, cell_indices_to_ids, cell_ids_to_indices, } @@ -293,7 +353,7 @@ impl Topology for MixedTopology { } fn vertex_index_to_id(&self, index: usize) -> usize { - self.vertex_indices_to_ids[&index] + self.vertex_indices_to_ids[index] } fn cell_index_to_id(&self, index: IndexType) -> usize { self.cell_indices_to_ids[&index] @@ -305,6 +365,12 @@ impl Topology for MixedTopology { panic!("Vertex with id {} not found", id); } } + fn edge_id_to_index(&self, id: usize) -> usize { + self.edge_ids_to_indices[&id] + } + fn edge_index_to_id(&self, index: usize) -> usize { + self.edge_indices_to_ids[index] + } fn cell_id_to_index(&self, id: usize) -> IndexType { self.cell_ids_to_indices[&id] } @@ -330,6 +396,7 @@ mod test { &[ReferenceCellType::Triangle; 2], &[0, 1, 2, 3], &[0, 1], + None, ) } @@ -343,6 +410,7 @@ mod test { ], &[0, 1, 2, 3, 4], &[0, 1], + None, ) } diff --git a/src/grid/parallel_grid.rs b/src/grid/parallel_grid.rs index efc9aa69..5d2a093d 100644 --- a/src/grid/parallel_grid.rs +++ b/src/grid/parallel_grid.rs @@ -1,9 +1,18 @@ //! A parallel implementation of a grid +use crate::element::reference_cell; use crate::grid::traits::{Grid, Topology}; +use crate::traits::grid::{Builder, GridType, ParallelBuilder}; use crate::traits::types::{CellLocalIndexPair, Ownership, ReferenceCellType}; -use mpi::topology::Communicator; +use mpi::{ + request::{LocalScope, WaitGuard}, + topology::{Communicator, Process}, + traits::{Buffer, Destination, Equivalence, Source}, +}; +use rlst::{rlst_dynamic_array2, Array, BaseArray, RandomAccessMut, VectorContainer}; use std::collections::HashMap; +type RlstMat = Array, 2>, 2>; + /// Grid local to a process pub struct LocalGrid { rank: usize, @@ -69,12 +78,18 @@ impl Topology for LocalGrid { fn vertex_index_to_id(&self, index: usize) -> usize { self.serial_grid.topology().vertex_index_to_id(index) } + fn edge_index_to_id(&self, index: usize) -> usize { + self.serial_grid.topology().edge_index_to_id(index) + } fn cell_index_to_id(&self, index: Self::IndexType) -> usize { self.serial_grid.topology().cell_index_to_id(index) } fn vertex_id_to_index(&self, id: usize) -> usize { self.serial_grid.topology().vertex_id_to_index(id) } + fn edge_id_to_index(&self, id: usize) -> usize { + self.serial_grid.topology().edge_id_to_index(id) + } fn cell_id_to_index(&self, id: usize) -> Self::IndexType { self.serial_grid.topology().cell_id_to_index(id) } @@ -122,10 +137,177 @@ impl<'comm, C: Communicator, G: Grid> ParallelGrid<'comm, C, G> { pub fn new( comm: &'comm C, serial_grid: G, - vertex_ownership: HashMap, - edge_ownership: HashMap, - cell_ownership: HashMap<<::Topology as Topology>::IndexType, Ownership>, + vertex_owners: HashMap, + edge_owners: HashMap, + cell_owners: HashMap, ) -> Self { + let rank = comm.rank() as usize; + let size = comm.size() as usize; + + // Create cell ownership + let mut cell_ownership = HashMap::new(); + let mut cells_to_query = vec![vec![]; size]; + + for (id, owner) in cell_owners.iter() { + if *owner != rank { + cells_to_query[*owner].push(*id); + } + } + mpi::request::scope(|scope| { + for (p, cq) in cells_to_query.iter().enumerate() { + if p != rank { + let _ = + WaitGuard::from(comm.process_at_rank(p as i32).immediate_send(scope, cq)); + } + } + }); + for p in 0..size { + if p != rank { + let (cells_queried, _status) = + comm.process_at_rank(p as i32).receive_vec::(); + let send_back = + cells_queried + .iter() + .map(|id| { + serial_grid.topology().face_index_to_flat_index( + Topology::cell_id_to_index(serial_grid.topology(), *id), + ) + }) + .collect::>(); + mpi::request::scope(|scope| { + let _ = WaitGuard::from( + comm.process_at_rank(p as i32) + .immediate_send(scope, &send_back), + ); + }); + } + } + let mut cell_info = vec![vec![]; size]; + for (p, ci) in cell_info.iter_mut().enumerate() { + if p != rank { + (*ci, _) = comm.process_at_rank(p as i32).receive_vec::(); + } + } + + let mut indices = vec![0; size]; + for (id, owner) in cell_owners.iter() { + cell_ownership.insert( + Topology::cell_id_to_index(serial_grid.topology(), *id), + if *owner == rank { + Ownership::Owned + } else { + indices[*owner] += 1; + Ownership::Ghost(*owner, cell_info[*owner][indices[*owner] - 1]) + }, + ); + } + + // Create vertex ownership + let mut vertex_ownership = HashMap::new(); + let mut vertices_to_query = vec![vec![]; size]; + + for (id, owner) in vertex_owners.iter() { + if *owner != rank { + vertices_to_query[*owner].push(*id); + } + } + mpi::request::scope(|scope| { + for (p, vq) in vertices_to_query.iter().enumerate() { + if p != rank { + let _ = + WaitGuard::from(comm.process_at_rank(p as i32).immediate_send(scope, vq)); + } + } + }); + for p in 0..size { + if p != rank { + let (vertices_queried, _status) = + comm.process_at_rank(p as i32).receive_vec::(); + let send_back = vertices_queried + .iter() + .map(|id| Topology::vertex_id_to_index(serial_grid.topology(), *id)) + .collect::>(); + mpi::request::scope(|scope| { + let _ = WaitGuard::from( + comm.process_at_rank(p as i32) + .immediate_send(scope, &send_back), + ); + }); + } + } + let mut vertex_info = vec![vec![]; size]; + for (p, vi) in vertex_info.iter_mut().enumerate() { + if p != rank { + (*vi, _) = comm.process_at_rank(p as i32).receive_vec::(); + } + } + + let mut indices = vec![0; size]; + for (id, owner) in vertex_owners.iter() { + vertex_ownership.insert( + Topology::vertex_id_to_index(serial_grid.topology(), *id), + if *owner == rank { + Ownership::Owned + } else { + indices[*owner] += 1; + Ownership::Ghost(*owner, vertex_info[*owner][indices[*owner] - 1]) + }, + ); + } + + // Create edge ownership + let mut edge_ownership = HashMap::new(); + let mut edges_to_query = vec![vec![]; size]; + + for (id, owner) in edge_owners.iter() { + if *owner != rank { + edges_to_query[*owner].push(*id); + } + } + mpi::request::scope(|scope| { + for (p, eq) in edges_to_query.iter().enumerate() { + if p != rank { + let _ = + WaitGuard::from(comm.process_at_rank(p as i32).immediate_send(scope, eq)); + } + } + }); + for p in 0..size { + if p != rank { + let (edges_queried, _status) = + comm.process_at_rank(p as i32).receive_vec::(); + let send_back = edges_queried + .iter() + .map(|id| Topology::edge_id_to_index(serial_grid.topology(), *id)) + .collect::>(); + mpi::request::scope(|scope| { + let _ = WaitGuard::from( + comm.process_at_rank(p as i32) + .immediate_send(scope, &send_back), + ); + }); + } + } + let mut edge_info = vec![vec![]; size]; + for (p, ei) in edge_info.iter_mut().enumerate() { + if p != rank { + (*ei, _) = comm.process_at_rank(p as i32).receive_vec::(); + } + } + + let mut indices = vec![0; size]; + for (id, owner) in edge_owners.iter() { + edge_ownership.insert( + Topology::edge_id_to_index(serial_grid.topology(), *id), + if *owner == rank { + Ownership::Owned + } else { + indices[*owner] += 1; + Ownership::Ghost(*owner, edge_info[*owner][indices[*owner] - 1]) + }, + ); + } + let local_grid = LocalGrid { rank: comm.rank() as usize, serial_grid, @@ -193,12 +375,18 @@ impl<'comm, C: Communicator, G: Grid> Topology for ParallelGrid<'comm, C, G> { fn vertex_index_to_id(&self, index: usize) -> usize { self.local_grid.topology().vertex_index_to_id(index) } + fn edge_index_to_id(&self, index: usize) -> usize { + self.local_grid.topology().edge_index_to_id(index) + } fn cell_index_to_id(&self, index: Self::IndexType) -> usize { self.local_grid.topology().cell_index_to_id(index) } fn vertex_id_to_index(&self, id: usize) -> usize { self.local_grid.topology().vertex_id_to_index(id) } + fn edge_id_to_index(&self, id: usize) -> usize { + self.local_grid.topology().edge_id_to_index(id) + } fn cell_id_to_index(&self, id: usize) -> Self::IndexType { self.local_grid.topology().cell_id_to_index(id) } @@ -209,7 +397,7 @@ impl<'comm, C: Communicator, G: Grid> Topology for ParallelGrid<'comm, C, G> { self.local_grid.topology().face_flat_index_to_index(index) } fn cell_types(&self) -> &[ReferenceCellType] { - self.local_grid.topology().cell_types() + Topology::cell_types(self.local_grid.topology()) } } @@ -234,3 +422,347 @@ impl<'a, C: Communicator, G: Grid> Grid for ParallelGrid<'a, C, G> { false } } + +/// Internal trait for building parallel grids +pub(crate) trait ParallelGridBuilder { + /// The serial grid type used on each process + type G: Grid; + + /// Extra cell info + type ExtraCellInfo: Clone + std::fmt::Debug; + + /// Push information from a cell to extra cell info + fn push_extra_cell_info(&self, _extra_cell_info: &mut Self::ExtraCellInfo, _cell_id: usize) {} + /// Send extra cell info from root process to another process + fn send_extra_cell_info<'a>( + &self, + _scope: &LocalScope<'a>, + _process: &Process, + _extra_cell_info: &'a Self::ExtraCellInfo, + ) { + } + /// Receive extra cell info from root process + fn receive_extra_cell_info( + &self, + _root_process: &Process, + _extra_cell_info: &mut Self::ExtraCellInfo, + ) { + } + /// Create new empty extra cell info + fn new_extra_cell_info(&self) -> Self::ExtraCellInfo; + + /// The id of each point + fn point_indices_to_ids(&self) -> &[usize]; + + /// The coordinates of each point + fn points(&self) -> &[<::G as GridType>::T]; + + /// The id of each cell + fn cell_indices_to_ids(&self) -> &[usize]; + + /// The point of a cell + fn cell_points(&self, index: usize) -> &[usize]; + + /// The vertices of a cell + fn cell_vertices(&self, index: usize) -> &[usize]; + + /// The cell type of a cell + fn cell_type(&self, index: usize) -> ReferenceCellType; + + #[allow(clippy::too_many_arguments)] + /// Create a serial grid on one process + fn create_serial_grid( + &self, + points: RlstMat<<::G as GridType>::T>, + cells: &[usize], + point_indices_to_ids: Vec, + point_ids_to_indices: HashMap, + cell_indices_to_ids: Vec, + cell_ids_to_indices: HashMap, + edge_ids: HashMap<[usize; 2], usize>, + extra_cell_info: &Self::ExtraCellInfo, + ) -> Self::G; + + #[allow(clippy::too_many_arguments)] + /// Internal function to create a parallel grid + fn create_internal<'a, C: Communicator>( + &self, + comm: &'a C, + points: &[<::G as GridType>::T], + point_ids: &[usize], + cells: &[usize], + cell_owners: &[usize], + cell_ids: &[usize], + vertex_owners: &[usize], + vertex_ids: &[usize], + edges: &[usize], + edge_owners: &[usize], + edge_ids: &[usize], + extra_cell_info: &Self::ExtraCellInfo, + ) -> ParallelGrid<'a, C, Self::G> { + let npts = point_ids.len(); + + let mut coordinates = + rlst_dynamic_array2!(<::G as GridType>::T, [npts, 3]); + for i in 0..npts { + for j in 0..3 { + *coordinates.get_mut([i, j]).unwrap() = points[i * 3 + j]; + } + } + + let mut point_ids_to_indices = HashMap::new(); + for (index, id) in point_ids.iter().enumerate() { + point_ids_to_indices.insert(*id, index); + } + let mut cell_ids_to_indices = HashMap::new(); + for (index, id) in cell_ids.iter().enumerate() { + cell_ids_to_indices.insert(*id, index); + } + + let mut edge_id_map = HashMap::new(); + for (n, id) in edge_ids.iter().enumerate() { + edge_id_map.insert([edges[2 * n], edges[2 * n + 1]], *id); + } + + let serial_grid = self.create_serial_grid( + coordinates, + cells, + point_ids.to_vec(), + point_ids_to_indices, + cell_ids.to_vec(), + cell_ids_to_indices, + edge_id_map, + extra_cell_info, + ); + + let mut vertex_owner_map = HashMap::new(); + for (id, owner) in vertex_ids.iter().zip(vertex_owners) { + vertex_owner_map.insert(*id, *owner); + } + let mut edge_owner_map = HashMap::new(); + for (id, owner) in edge_ids.iter().zip(edge_owners) { + edge_owner_map.insert(*id, *owner); + } + let mut cell_owner_map = HashMap::new(); + for (id, owner) in cell_ids.iter().zip(cell_owners) { + cell_owner_map.insert(*id, *owner); + } + + ParallelGrid::new( + comm, + serial_grid, + vertex_owner_map, + edge_owner_map, + cell_owner_map, + ) + } +} + +impl + Builder> + ParallelBuilder for B +where + Vec<::T>: Buffer, + ::T: Equivalence, +{ + type ParallelGridType<'a, C: Communicator + 'a> = ParallelGrid<'a, C, G>; + + fn create_parallel_grid<'a, C: Communicator>( + self, + comm: &'a C, + cell_owners: &HashMap, + ) -> Self::ParallelGridType<'a, C> { + let rank = comm.rank() as usize; + let size = comm.size() as usize; + + let npts = self.point_indices_to_ids().len(); + let ncells = self.cell_indices_to_ids().len(); + + // data used in computation + let mut vertex_owners = vec![(-1, 0); npts]; + let mut vertex_counts = vec![0; size]; + let mut cell_indices_per_proc = vec![vec![]; size]; + let mut vertex_indices_per_proc = vec![vec![]; size]; + let mut edge_owners = HashMap::new(); + let mut edge_ids = HashMap::new(); + let mut edge_counts = vec![0; size]; + let mut edges_included_per_proc = vec![vec![]; size]; + let mut edge_id = 0; + + // data to send to other processes + let mut points_per_proc = vec![vec![]; size]; + let mut point_ids_per_proc = vec![vec![]; size]; + let mut cells_per_proc = vec![vec![]; size]; + let mut cell_owners_per_proc = vec![vec![]; size]; + let mut cell_ids_per_proc = vec![vec![]; size]; + let mut vertex_owners_per_proc = vec![vec![]; size]; + let mut edges_per_proc = vec![vec![]; size]; + let mut edge_owners_per_proc = vec![vec![]; size]; + let mut edge_ids_per_proc = vec![vec![]; size]; + let mut extra_cell_info_per_proc = vec![self.new_extra_cell_info(); size]; + + for (index, id) in self.cell_indices_to_ids().iter().enumerate() { + let owner = cell_owners[id]; + // TODO: only assign owners to the first 3 or 4 vertices + for v in self.cell_points(index) { + if vertex_owners[*v].0 == -1 { + vertex_owners[*v] = (owner as i32, vertex_counts[owner]); + } + if !vertex_indices_per_proc[owner].contains(v) { + vertex_indices_per_proc[owner].push(*v); + vertex_owners_per_proc[owner].push(vertex_owners[*v].0 as usize); + for i in 0..GDIM { + points_per_proc[owner].push(self.points()[v * GDIM + i]) + } + point_ids_per_proc[owner].push(self.point_indices_to_ids()[*v]); + vertex_counts[owner] += 1; + } + } + } + + for (index, id) in self.cell_indices_to_ids().iter().enumerate() { + let ref_conn = &reference_cell::connectivity(self.cell_type(index))[1]; + let owner = cell_owners[id]; + for e in ref_conn { + let cell = self.cell_vertices(index); + let mut v0 = cell[e[0][0]]; + let mut v1 = cell[e[0][1]]; + if v0 > v1 { + std::mem::swap(&mut v0, &mut v1); + } + if edge_owners.get_mut(&(v0, v1)).is_none() { + edge_owners.insert((v0, v1), (owner, edge_counts[owner])); + edge_ids.insert((v0, v1), edge_id); + edge_id += 1; + edges_included_per_proc[owner].push((v0, v1)); + edges_per_proc[owner].push(v0); + edges_per_proc[owner].push(v1); + edge_owners_per_proc[owner].push(edge_owners[&(v0, v1)].0); + edge_ids_per_proc[owner].push(edge_ids[&(v0, v1)]); + edge_counts[owner] += 1; + } + } + } + + for index in 0..ncells { + for p in 0..size { + for v in self.cell_points(index) { + if vertex_indices_per_proc[p].contains(v) { + cell_indices_per_proc[p].push(index); + break; + } + } + } + } + + for p in 0..size { + for index in &cell_indices_per_proc[p] { + let id = self.cell_indices_to_ids()[*index]; + // TODO: only assign owners to the first 3 or 4 vertices + for v in self.cell_points(*index) { + if !vertex_indices_per_proc[p].contains(v) { + vertex_indices_per_proc[p].push(*v); + vertex_owners_per_proc[p].push(vertex_owners[*v].0 as usize); + for i in 0..GDIM { + points_per_proc[p].push(self.points()[v * GDIM + i]); + } + point_ids_per_proc[p].push(self.point_indices_to_ids()[*v]); + } + cells_per_proc[p].push( + vertex_indices_per_proc[p] + .iter() + .position(|&r| r == *v) + .unwrap(), + ); + } + let ref_conn = &reference_cell::connectivity(self.cell_type(*index))[1]; + + for e in ref_conn { + let cell = self.cell_vertices(*index); + let mut v0 = cell[e[0][0]]; + let mut v1 = cell[e[0][1]]; + if v0 > v1 { + std::mem::swap(&mut v0, &mut v1); + } + if !edges_included_per_proc[p].contains(&(v0, v1)) { + edges_included_per_proc[p].push((v0, v1)); + edges_per_proc[p].push(v0); + edges_per_proc[p].push(v1); + edge_owners_per_proc[p].push(edge_owners[&(v0, v1)].0); + edge_ids_per_proc[p].push(edge_ids[&(v0, v1)]); + } + } + + cell_ids_per_proc[p].push(id); + cell_owners_per_proc[p].push(cell_owners[&id]); + self.push_extra_cell_info(&mut extra_cell_info_per_proc[p], id); + } + } + + mpi::request::scope(|scope| { + for p in 1..size { + let process = comm.process_at_rank(p as i32); + let _ = WaitGuard::from(process.immediate_send(scope, &points_per_proc[p])); + let _ = WaitGuard::from(process.immediate_send(scope, &point_ids_per_proc[p])); + let _ = WaitGuard::from(process.immediate_send(scope, &cells_per_proc[p])); + let _ = WaitGuard::from(process.immediate_send(scope, &cell_owners_per_proc[p])); + let _ = WaitGuard::from(process.immediate_send(scope, &cell_ids_per_proc[p])); + let _ = WaitGuard::from(process.immediate_send(scope, &vertex_owners_per_proc[p])); + let _ = WaitGuard::from(process.immediate_send(scope, &edges_per_proc[p])); + let _ = WaitGuard::from(process.immediate_send(scope, &edge_owners_per_proc[p])); + let _ = WaitGuard::from(process.immediate_send(scope, &edge_ids_per_proc[p])); + self.send_extra_cell_info(scope, &process, &extra_cell_info_per_proc[p]); + } + }); + + self.create_internal( + comm, + &points_per_proc[rank], + &point_ids_per_proc[rank], + &cells_per_proc[rank], + &cell_owners_per_proc[rank], + &cell_ids_per_proc[rank], + &vertex_owners_per_proc[rank], + &point_ids_per_proc[rank], + &edges_per_proc[rank], + &edge_owners_per_proc[rank], + &edge_ids_per_proc[rank], + &extra_cell_info_per_proc[rank], + ) + } + + fn receive_parallel_grid( + self, + comm: &C, + root_rank: usize, + ) -> ParallelGrid<'_, C, G> { + let root_process = comm.process_at_rank(root_rank as i32); + + let (points, _status) = + root_process.receive_vec::<<::G as GridType>::T>(); + let (point_ids, _status) = root_process.receive_vec::(); + let (cells, _status) = root_process.receive_vec::(); + let (cell_owners, _status) = root_process.receive_vec::(); + let (cell_ids, _status) = root_process.receive_vec::(); + let (vertex_owners, _status) = root_process.receive_vec::(); + let (edges, _status) = root_process.receive_vec::(); + let (edge_owners, _status) = root_process.receive_vec::(); + let (edge_ids, _status) = root_process.receive_vec::(); + let mut extra_cell_info = self.new_extra_cell_info(); + self.receive_extra_cell_info(&root_process, &mut extra_cell_info); + + self.create_internal( + comm, + &points, + &point_ids, + &cells, + &cell_owners, + &cell_ids, + &vertex_owners, + &point_ids, + &edges, + &edge_owners, + &edge_ids, + &extra_cell_info, + ) + } +} diff --git a/src/grid/single_element_grid.rs b/src/grid/single_element_grid.rs index 0960737d..1a0002be 100644 --- a/src/grid/single_element_grid.rs +++ b/src/grid/single_element_grid.rs @@ -6,8 +6,8 @@ mod builder; mod geometry; mod grid; mod io; -//#[cfg(feature = "mpi")] -//mod parallel; +#[cfg(feature = "mpi")] +mod parallel; mod topology; pub use self::builder::SingleElementGridBuilder; diff --git a/src/grid/single_element_grid/builder.rs b/src/grid/single_element_grid/builder.rs index 751fc181..d1dae937 100644 --- a/src/grid/single_element_grid/builder.rs +++ b/src/grid/single_element_grid/builder.rs @@ -1,6 +1,6 @@ //! Grid builder -use crate::element::ciarlet::lagrange; +use crate::element::{ciarlet::lagrange, reference_cell}; use crate::grid::single_element_grid::grid::SingleElementGrid; use crate::traits::element::{Continuity, FiniteElement}; use crate::traits::grid::Builder; @@ -17,6 +17,9 @@ use std::collections::HashMap; pub struct SingleElementGridBuilder> { pub(crate) element_data: (ReferenceCellType, usize), pub(crate) points_per_cell: usize, + // Dead code allowed here, as vertices_per_cell is only used if the mpi feature is activated + #[allow(dead_code)] + pub(crate) vertices_per_cell: usize, pub(crate) points: Vec, pub(crate) cells: Vec, pub(crate) point_indices_to_ids: Vec, @@ -37,9 +40,12 @@ where fn new(data: (ReferenceCellType, usize)) -> Self { let points_per_cell = lagrange::create::(data.0, data.1, Continuity::Continuous).dim(); + let vertices_per_cell = reference_cell::entity_counts(data.0)[0]; + Self { element_data: data, points_per_cell, + vertices_per_cell, points: vec![], cells: vec![], point_indices_to_ids: vec![], @@ -51,9 +57,11 @@ where fn new_with_capacity(npoints: usize, ncells: usize, data: (ReferenceCellType, usize)) -> Self { let points_per_cell = lagrange::create::(data.0, data.1, Continuity::Continuous).dim(); + let vertices_per_cell = reference_cell::entity_counts(data.0)[0]; Self { element_data: data, points_per_cell, + vertices_per_cell, points: Vec::with_capacity(npoints * Self::GDIM), cells: Vec::with_capacity(ncells * points_per_cell), point_indices_to_ids: Vec::with_capacity(npoints), @@ -100,6 +108,7 @@ where self.point_ids_to_indices, self.cell_indices_to_ids, self.cell_ids_to_indices, + None, ) } } diff --git a/src/grid/single_element_grid/grid.rs b/src/grid/single_element_grid/grid.rs index 34f1a540..fca975dd 100644 --- a/src/grid/single_element_grid/grid.rs +++ b/src/grid/single_element_grid/grid.rs @@ -38,6 +38,7 @@ where point_ids_to_indices: HashMap, cell_indices_to_ids: Vec, cell_ids_to_indices: HashMap, + edge_ids: Option>, ) -> Self { if cell_type == ReferenceCellType::Triangle && cell_degree == 1 { warn!("Creating a single element grid with a P1 triangle. Using a FlatTriangleGrid would be faster."); @@ -59,6 +60,7 @@ where cell_type, &point_indices_to_ids, &cell_indices_to_ids, + edge_ids, ); let geometry = SingleElementGeometry::::new( points, diff --git a/src/grid/single_element_grid/parallel.rs b/src/grid/single_element_grid/parallel.rs index 78e552c5..00adb1a6 100644 --- a/src/grid/single_element_grid/parallel.rs +++ b/src/grid/single_element_grid/parallel.rs @@ -1,383 +1,66 @@ //! Parallel grid builder -use crate::element::reference_cell; -use crate::grid::parallel_grid::ParallelGrid; +use crate::grid::parallel_grid::ParallelGridBuilder; use crate::grid::single_element_grid::{SingleElementGrid, SingleElementGridBuilder}; -use crate::traits::grid::ParallelBuilder; -use crate::traits::types::{Ownership, ReferenceCellType}; -use mpi::{ - request::WaitGuard, - topology::Communicator, - traits::{Buffer, Destination, Equivalence, Source}, -}; +use crate::traits::types::ReferenceCellType; +use mpi::traits::{Buffer, Equivalence}; use num::Float; use rlst::{ - dense::array::views::ArrayViewMut, rlst_dynamic_array2, Array, BaseArray, MatrixInverse, - RandomAccessMut, RlstScalar, VectorContainer, + dense::array::views::ArrayViewMut, Array, BaseArray, MatrixInverse, RlstScalar, VectorContainer, }; use std::collections::HashMap; -impl + Equivalence> ParallelBuilder - for SingleElementGridBuilder +impl + Equivalence> ParallelGridBuilder + for SingleElementGridBuilder<3, T> where for<'a> Array, 2>, 2>, 2>: MatrixInverse, [T]: Buffer, { - type ParallelGridType<'a, C: Communicator + 'a> = ParallelGrid<'a, C, SingleElementGrid>; - fn create_parallel_grid<'a, C: Communicator>( - self, - comm: &'a C, - cell_owners: &HashMap, - ) -> ParallelGrid<'a, C, SingleElementGrid> { - let rank = comm.rank() as usize; - let size = comm.size() as usize; - - let npts = self.point_indices_to_ids.len(); - let ncells = self.cell_indices_to_ids.len(); - - // data used in computation - let mut vertex_owners = vec![(-1, 0); npts]; - let mut vertex_counts = vec![0; size]; - let mut cell_indices_per_proc = vec![vec![]; size]; - let mut vertex_indices_per_proc = vec![vec![]; size]; - let mut edge_owners = HashMap::new(); - let mut edge_counts = vec![0; size]; - let mut edges_per_proc = vec![vec![]; size]; - - // data to send to other processes - let mut points_per_proc = vec![vec![]; size]; - let mut cells_per_proc = vec![vec![]; size]; - let mut point_ids_per_proc = vec![vec![]; size]; - let mut cell_ids_per_proc = vec![vec![]; size]; - let mut vertex_owners_per_proc = vec![vec![]; size]; - let mut vertex_local_indices_per_proc = vec![vec![]; size]; - let mut edge_vertices0_per_proc = vec![vec![]; size]; - let mut edge_vertices1_per_proc = vec![vec![]; size]; - let mut edge_owners_per_proc = vec![vec![]; size]; - let mut edge_local_indices_per_proc = vec![vec![]; size]; - let mut cell_owners_per_proc = vec![vec![]; size]; - let mut cell_local_indices_per_proc = vec![vec![]; size]; - - for (index, id) in self.cell_indices_to_ids.iter().enumerate() { - let owner = cell_owners[id]; - for v in &self.cells[self.points_per_cell * index..self.points_per_cell * (index + 1)] { - if vertex_owners[*v].0 == -1 { - vertex_owners[*v] = (owner as i32, vertex_counts[owner]); - } - if !vertex_indices_per_proc[owner].contains(v) { - vertex_indices_per_proc[owner].push(*v); - vertex_owners_per_proc[owner].push(vertex_owners[*v].0 as usize); - vertex_local_indices_per_proc[owner].push(vertex_owners[*v].1); - for i in 0..GDIM { - points_per_proc[owner].push(self.points[v * GDIM + i]) - } - point_ids_per_proc[owner].push(self.point_indices_to_ids[*v]); - vertex_counts[owner] += 1; - } - } - } - - let ref_conn = &reference_cell::connectivity(self.element_data.0)[1]; - - for (index, id) in self.cell_indices_to_ids.iter().enumerate() { - let owner = cell_owners[&id]; - for e in ref_conn { - let v0 = self.cells[ref_conn.len() * index + e[0][0]]; - let v1 = self.cells[ref_conn.len() * index + e[0][1]]; - let edge = if v0 < v1 { [v0, v1] } else { [v1, v0] }; - let local_v0 = vertex_indices_per_proc[owner] - .iter() - .position(|&r| r == v0) - .unwrap(); - let local_v1 = vertex_indices_per_proc[owner] - .iter() - .position(|&r| r == v1) - .unwrap(); - let local_edge = if local_v0 < local_v1 { - [local_v0, local_v1] - } else { - [local_v1, local_v0] - }; - if edge_owners.get_mut(&edge).is_none() { - edge_owners.insert(edge, (owner, edge_counts[owner])); - edges_per_proc[owner].push(edge); - edge_vertices0_per_proc[owner].push(local_edge[0]); - edge_vertices1_per_proc[owner].push(local_edge[1]); - edge_owners_per_proc[owner].push(edge_owners[&edge].0); - edge_local_indices_per_proc[owner].push(edge_owners[&edge].1); - edge_counts[owner] += 1; - } - } - } - - for index in 0..ncells { - for p in 0..size { - for v in - &self.cells[self.points_per_cell * index..self.points_per_cell * (index + 1)] - { - if vertex_indices_per_proc[p].contains(v) { - cell_indices_per_proc[p].push(index); - break; - } - } - } - } + type G = SingleElementGrid; + type ExtraCellInfo = (); - for p in 0..size { - for index in &cell_indices_per_proc[p] { - let id = self.cell_indices_to_ids[*index]; - for v in - &self.cells[self.points_per_cell * index..self.points_per_cell * (index + 1)] - { - if !vertex_indices_per_proc[p].contains(v) { - vertex_indices_per_proc[p].push(*v); - vertex_owners_per_proc[p].push(vertex_owners[*v].0 as usize); - vertex_local_indices_per_proc[p].push(vertex_owners[*v].1); - for i in 0..GDIM { - points_per_proc[p].push(self.points[v * GDIM + i]) - } - point_ids_per_proc[p].push(self.point_indices_to_ids[*v]) - } - cells_per_proc[p].push( - vertex_indices_per_proc[p] - .iter() - .position(|&r| r == *v) - .unwrap(), - ); - } + fn new_extra_cell_info(&self) {} - for e in ref_conn { - let v0 = self.cells[ref_conn.len() * index + e[0][0]]; - let v1 = self.cells[ref_conn.len() * index + e[0][1]]; - let edge = if v0 < v1 { [v0, v1] } else { [v1, v0] }; - let local_v0 = vertex_indices_per_proc[p] - .iter() - .position(|&r| r == v0) - .unwrap(); - let local_v1 = vertex_indices_per_proc[p] - .iter() - .position(|&r| r == v1) - .unwrap(); - let local_edge = if local_v0 < local_v1 { - [local_v0, local_v1] - } else { - [local_v1, local_v0] - }; - if !edges_per_proc[p].contains(&edge) { - edges_per_proc[p].push(edge); - edge_vertices0_per_proc[p].push(local_edge[0]); - edge_vertices1_per_proc[p].push(local_edge[1]); - edge_owners_per_proc[p].push(edge_owners[&edge].0); - edge_local_indices_per_proc[p].push(edge_owners[&edge].1); - } - } - - cell_ids_per_proc[p].push(id); - cell_owners_per_proc[p].push(cell_owners[&id]); - cell_local_indices_per_proc[p].push( - cell_indices_per_proc[cell_owners[&id]] - .iter() - .position(|&r| r == *index) - .unwrap(), - ); - } - } - - mpi::request::scope(|scope| { - for p in 1..size { - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &points_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cells_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &point_ids_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &vertex_owners_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &vertex_local_indices_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_ids_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_owners_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &cell_local_indices_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &edge_owners_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &edge_local_indices_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &edge_vertices0_per_proc[p]), - ); - let _ = WaitGuard::from( - comm.process_at_rank(p as i32) - .immediate_send(scope, &edge_vertices1_per_proc[p]), - ); - } - }); - self.create_internal( - comm, - &points_per_proc[rank], - &cells_per_proc[rank], - &point_ids_per_proc[rank], - &vertex_owners_per_proc[rank], - &vertex_local_indices_per_proc[rank], - &cell_ids_per_proc[rank], - &cell_owners_per_proc[rank], - &cell_local_indices_per_proc[rank], - &edge_owners_per_proc[rank], - &edge_local_indices_per_proc[rank], - &edge_vertices0_per_proc[rank], - &edge_vertices1_per_proc[rank], - ) + fn point_indices_to_ids(&self) -> &[usize] { + &self.point_indices_to_ids } - fn receive_parallel_grid( - self, - comm: &C, - root_rank: usize, - ) -> ParallelGrid<'_, C, SingleElementGrid> { - let root_process = comm.process_at_rank(root_rank as i32); - - let (points, _status) = root_process.receive_vec::(); - let (cells, _status) = root_process.receive_vec::(); - let (point_ids, _status) = root_process.receive_vec::(); - let (vertex_owners, _status) = root_process.receive_vec::(); - let (vertex_local_indices, _status) = root_process.receive_vec::(); - let (cell_ids, _status) = root_process.receive_vec::(); - let (cell_owners, _status) = root_process.receive_vec::(); - let (cell_local_indices, _status) = root_process.receive_vec::(); - let (edge_owners, _status) = root_process.receive_vec::(); - let (edge_local_indices, _status) = root_process.receive_vec::(); - let (edge_vertices0, _status) = root_process.receive_vec::(); - let (edge_vertices1, _status) = root_process.receive_vec::(); - self.create_internal( - comm, - &points, - &cells, - &point_ids, - &vertex_owners, - &vertex_local_indices, - &cell_ids, - &cell_owners, - &cell_local_indices, - &edge_owners, - &edge_local_indices, - &edge_vertices0, - &edge_vertices1, - ) + fn points(&self) -> &[T] { + &self.points } -} - -impl> SingleElementGridBuilder -where - for<'a> Array, 2>, 2>, 2>: MatrixInverse, -{ - #[allow(clippy::too_many_arguments)] - fn create_internal<'a, C: Communicator>( - self, - comm: &'a C, - points: &[T], + fn cell_indices_to_ids(&self) -> &[usize] { + &self.cell_indices_to_ids + } + fn cell_points(&self, index: usize) -> &[usize] { + &self.cells[self.points_per_cell * index..self.points_per_cell * (index + 1)] + } + fn cell_vertices(&self, index: usize) -> &[usize] { + &self.cells + [self.points_per_cell * index..self.points_per_cell * index + self.vertices_per_cell] + } + fn cell_type(&self, _index: usize) -> ReferenceCellType { + self.element_data.0 + } + fn create_serial_grid( + &self, + points: Array, 2>, 2>, cells: &[usize], - point_ids: &[usize], - vertex_owners: &[usize], - vertex_local_indices: &[usize], - cell_ids: &[usize], - cell_owners: &[usize], - cell_local_indices: &[usize], - edge_owners: &[usize], - edge_local_indices: &[usize], - edge_vertices0: &[usize], - edge_vertices1: &[usize], - ) -> ParallelGrid<'a, C, SingleElementGrid> { - let rank = comm.rank() as usize; - let npts = point_ids.len(); - let ncells = cell_ids.len(); - - let mut coordinates = rlst_dynamic_array2!(T, [npts, 3]); - for i in 0..npts { - for j in 0..3 { - *coordinates.get_mut([i, j]).unwrap() = points[i * 3 + j]; - } - } - - let mut point_ids_to_indices = HashMap::new(); - for (index, id) in point_ids.iter().enumerate() { - point_ids_to_indices.insert(*id, index); - } - let mut cell_ids_to_indices = HashMap::new(); - for (index, id) in cell_ids.iter().enumerate() { - cell_ids_to_indices.insert(*id, index); - } - - let mut cell_ownership = vec![]; - for index in 0..ncells { - cell_ownership.push(if cell_owners[index] == rank { - Ownership::Owned - } else { - Ownership::Ghost(cell_owners[index], cell_local_indices[index]) - }); - } - let mut vertex_ownership = vec![]; - for index in 0..npts { - vertex_ownership.push(if vertex_owners[index] == rank { - Ownership::Owned - } else { - Ownership::Ghost(vertex_owners[index], vertex_local_indices[index]) - }); - } - - let edge_ownership = edge_owners - .iter() - .zip(edge_local_indices) - .map(|(i, j)| { - if *i == rank { - Ownership::Owned - } else { - Ownership::Ghost(*i, *j) - } - }) - .collect::>(); - let edges = edge_vertices0 - .iter() - .zip(edge_vertices1) - .map(|(i, j)| [*i, *j]) - .collect::>(); - - let serial_grid = SingleElementGrid::new( - coordinates, + point_indices_to_ids: Vec, + point_ids_to_indices: HashMap, + cell_indices_to_ids: Vec, + cell_ids_to_indices: HashMap, + edge_ids: HashMap<[usize; 2], usize>, + _extra_cell_info: &(), + ) -> Self::G { + SingleElementGrid::new( + points, cells, self.element_data.0, self.element_data.1, - point_ids.to_vec(), + point_indices_to_ids, point_ids_to_indices, - cell_ids.to_vec(), + cell_indices_to_ids, cell_ids_to_indices, - Some(cell_ownership), - Some(edges), - Some(edge_ownership), - Some(vertex_ownership), - ); - - ParallelGrid::new(comm, serial_grid) + Some(edge_ids), + ) } } diff --git a/src/grid/single_element_grid/topology.rs b/src/grid/single_element_grid/topology.rs index 176f012e..c2fcdef1 100644 --- a/src/grid/single_element_grid/topology.rs +++ b/src/grid/single_element_grid/topology.rs @@ -32,6 +32,8 @@ pub struct SingleElementTopology { entity_types: Vec, vertex_indices_to_ids: Vec, vertex_ids_to_indices: HashMap, + edge_indices_to_ids: Vec, + edge_ids_to_indices: HashMap, cell_indices_to_ids: Vec, cell_ids_to_indices: HashMap, cell_types: [ReferenceCellType; 1], @@ -47,6 +49,7 @@ impl SingleElementTopology { cell_type: ReferenceCellType, point_indices_to_ids: &[usize], grid_cell_indices_to_ids: &[usize], + edge_ids: Option>, ) -> Self { let size = reference_cell::entity_counts(cell_type)[0]; let ncells = cells_input.len() / size; @@ -82,9 +85,9 @@ impl SingleElementTopology { for v in cell { if !vertices.contains(v) { entities_to_cells[0].push(vec![]); - vertices.push(*v); vertex_indices_to_ids.push(point_indices_to_ids[*v]); - vertex_ids_to_indices.insert(point_indices_to_ids[*v], *v); + vertex_ids_to_indices.insert(point_indices_to_ids[*v], vertices.len()); + vertices.push(*v); } row.push(vertices.iter().position(|&r| r == *v).unwrap()); } @@ -102,7 +105,52 @@ impl SingleElementTopology { entities_to_vertices[0] = (0..vertices.len()).map(|i| vec![i]).collect::>(); - for d in 1..dim { + let mut edge_indices = HashMap::new(); + let mut edge_indices_to_ids = vec![]; + let mut edge_ids_to_indices = HashMap::new(); + if let Some(e) = &edge_ids { + for (edge_i, (i, j)) in e.iter().enumerate() { + let mut v0 = vertex_ids_to_indices[&i[0]]; + let mut v1 = vertex_ids_to_indices[&i[1]]; + if v0 > v1 { + std::mem::swap(&mut v0, &mut v1); + } + edge_indices.insert((v0, v1), edge_i); + edge_indices_to_ids.push(*j); + edge_ids_to_indices.insert(*j, edge_i); + entities_to_vertices[1].push(vec![v0, v1]); + entities_to_cells[1].push(vec![]); + } + } + let ref_conn = &reference_cell::connectivity(cell_type)[1]; + for cell_i in 0..ncells { + for (local_index, rc) in ref_conn.iter().enumerate() { + let cell = &cells_to_entities[0][cell_i]; + let mut first = cell[rc[0][0]]; + let mut second = cell[rc[0][1]]; + if first > second { + std::mem::swap(&mut first, &mut second); + } + if let Some(edge_index) = edge_indices.get(&(first, second)) { + cells_to_entities[1][cell_i].push(*edge_index); + entities_to_cells[1][*edge_index] + .push(CellLocalIndexPair::new(cell_i, local_index)); + } else { + if edge_ids.is_some() { + panic!("Missing id for edge"); + } + let id = entities_to_vertices[1].len(); + edge_indices.insert((first, second), id); + edge_indices_to_ids.push(id); + edge_ids_to_indices.insert(id, id); + cells_to_entities[1][cell_i].push(entities_to_vertices[1].len()); + entities_to_cells[1].push(vec![CellLocalIndexPair::new(cell_i, local_index)]); + entities_to_vertices[1].push(vec![first, second]); + } + } + } + + for d in 2..dim { let mut c_to_e = vec![]; let ref_conn = &reference_cell::connectivity(cell_type)[d]; for (cell_i, cell) in cells_to_entities[0].iter().enumerate() { @@ -139,6 +187,8 @@ impl SingleElementTopology { entities_to_cells, entity_types, vertex_indices_to_ids, + edge_ids_to_indices, + edge_indices_to_ids, vertex_ids_to_indices, cell_indices_to_ids, cell_ids_to_indices, @@ -241,6 +291,12 @@ impl Topology for SingleElementTopology { panic!("Vertex with id {} not found", id); } } + fn edge_id_to_index(&self, id: usize) -> usize { + self.edge_ids_to_indices[&id] + } + fn edge_index_to_id(&self, index: usize) -> usize { + self.edge_indices_to_ids[index] + } fn cell_id_to_index(&self, id: usize) -> usize { self.cell_ids_to_indices[&id] } @@ -266,6 +322,7 @@ mod test { ReferenceCellType::Triangle, &[0, 1, 2, 3], &[0, 1], + None, ) } diff --git a/src/grid/traits.rs b/src/grid/traits.rs index 5595cd01..375ea74c 100644 --- a/src/grid/traits.rs +++ b/src/grid/traits.rs @@ -66,6 +66,12 @@ pub trait Topology { /// Get the id of a vertex from its index fn vertex_index_to_id(&self, index: usize) -> usize; + /// Get the id of a vertex from its index + fn edge_index_to_id(&self, index: usize) -> usize; + + /// Get the index of a vertex from its id + fn edge_id_to_index(&self, index: usize) -> usize; + /// Get the id of a cell from its index fn cell_index_to_id(&self, index: Self::IndexType) -> usize;