Skip to content

Commit

Permalink
First batched assembler (#122)
Browse files Browse the repository at this point in the history
* format data from bempp-cl to take up less space

* start working on batched assembly

* workin on batched assembly

* Working on batched assembly

* clippy

* Use rayon in assembly

* move memory assignment, evaluation of basis functions out of function

* assemble the batches before moving on to next colour (!)

* start planning out singular assembly

* formatting

* add example for timing assembly

* cty

* update parallel grid

* batch the singular terms too

* only make raw output once

* run tests with --release on CI too
  • Loading branch information
mscroggs authored Oct 10, 2023
1 parent c4912c8 commit 4590040
Show file tree
Hide file tree
Showing 13 changed files with 1,015 additions and 1,051 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/run-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,12 @@ jobs:

- name: Run unit tests
run: cargo test --lib --features "strict"
- name: Run unit tests (release)
run: cargo test --lib --release --features "strict"
- name: Run unit tests (with mpi enabled)
run: cargo test --lib --features "mpi,strict"
- name: Run unit tests (release with mpi enabled)
run: cargo test --lib --release --features "mpi,strict"
- name: Run tests
run: cargo test --examples --release --features "mpi,strict"
- name: Run examples
Expand Down
5 changes: 5 additions & 0 deletions .github/workflows/run-weekly-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,15 @@ jobs:
- name: Build rust library (release with mpi)
run: cargo build --release --features "strict,mpi"


- name: Run unit tests
run: cargo test --lib --features "strict"
- name: Run unit tests (release)
run: cargo test --lib --release --features "strict"
- name: Run unit tests (with mpi enabled)
run: cargo test --lib --features "mpi,strict"
- name: Run unit tests (release with mpi enabled)
run: cargo test --lib --release --features "mpi,strict"
- name: Run tests
run: cargo test --examples --release --features "mpi,strict"
- name: Run examples
Expand Down
1 change: 1 addition & 0 deletions bem/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,4 @@ approx = "0.5"
itertools = "0.10"
mpi = { version = "0.6.*", optional = true }
num = "0.4"
rayon = "1.7"
44 changes: 44 additions & 0 deletions bem/examples/assembly_timing.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
use bempp_bem::assembly::{assemble_batched, BoundaryOperator, PDEType};
use bempp_bem::function_space::SerialFunctionSpace;
use bempp_element::element::create_element;
use bempp_grid::shapes::regular_sphere;
use bempp_tools::arrays::Array2D;
use bempp_traits::bem::DofMap;
use bempp_traits::bem::FunctionSpace;
use bempp_traits::cell::ReferenceCellType;
use bempp_traits::element::{Continuity, ElementFamily};
use num::complex::Complex;
use std::time::Instant;

fn main() {
for i in 0..5 {
let now = Instant::now();
let grid = regular_sphere(i);
let element = create_element(
ElementFamily::Lagrange,
ReferenceCellType::Triangle,
0,
Continuity::Discontinuous,
);

let space = SerialFunctionSpace::new(&grid, &element);
let mut matrix = Array2D::<Complex<f64>>::new((
space.dofmap().global_size(),
space.dofmap().global_size(),
));

assemble_batched(
&mut matrix,
BoundaryOperator::SingleLayer,
PDEType::Helmholtz(5.0),
&space,
&space,
);

println!(
"{} {}",
space.dofmap().global_size(),
now.elapsed().as_millis()
)
}
}
92 changes: 92 additions & 0 deletions bem/src/assembly.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
pub mod batched;
pub mod dense;
use crate::function_space::SerialFunctionSpace;
use crate::green;
use crate::green::Scalar;
use bempp_tools::arrays::Array2D;
Expand Down Expand Up @@ -112,6 +114,96 @@ pub fn assemble_dense<'a, T: Scalar>(
},
};
}

/// Assemble an operator into a dense matrix using batched parallelisation
pub fn assemble_batched<'a, T: Scalar + Copy + Sync>(
// TODO: ouput should be `&mut impl ArrayAccess2D` once such a trait exists
output: &mut Array2D<T>,
operator: BoundaryOperator,
pde: PDEType,
trial_space: &SerialFunctionSpace<'a>,
test_space: &SerialFunctionSpace<'a>,
) {
match pde {
PDEType::Laplace => match operator {
BoundaryOperator::SingleLayer => {
batched::assemble(
output,
&green::LaplaceGreenKernel {},
false,
false,
trial_space,
test_space,
);
}
BoundaryOperator::DoubleLayer => {
batched::assemble(
output,
&green::LaplaceGreenDyKernel {},
false,
true,
trial_space,
test_space,
);
}
BoundaryOperator::AdjointDoubleLayer => {
batched::assemble(
output,
&green::LaplaceGreenDxKernel {},
true,
false,
trial_space,
test_space,
);
}
//BoundaryOperator::Hypersingular => {
// batched::laplace_hypersingular_assemble(output, trial_space, test_space);
//}
_ => {
panic!("Invalid operator");
}
},
PDEType::Helmholtz(k) => match operator {
BoundaryOperator::SingleLayer => {
batched::assemble(
output,
&green::HelmholtzGreenKernel { k },
false,
false,
trial_space,
test_space,
);
}
BoundaryOperator::DoubleLayer => {
batched::assemble(
output,
&green::HelmholtzGreenDyKernel { k },
false,
true,
trial_space,
test_space,
);
}
BoundaryOperator::AdjointDoubleLayer => {
batched::assemble(
output,
&green::HelmholtzGreenDxKernel { k },
true,
false,
trial_space,
test_space,
);
}
//BoundaryOperator::Hypersingular => {
// batched::helmholtz_hypersingular_assemble(output, trial_space, test_space, k);
//}
_ => {
panic!("Invalid operator");
}
},
};
}

#[cfg(test)]
mod test {
use crate::assembly::dense;
Expand Down
Loading

0 comments on commit 4590040

Please sign in to comment.