diff --git a/fmm/src/fmm.rs b/fmm/src/fmm.rs index 5b453511..e2fa2614 100644 --- a/fmm/src/fmm.rs +++ b/fmm/src/fmm.rs @@ -770,22 +770,112 @@ where U: Scalar + Float + Default, FmmDataLinear: SourceTranslation + FieldTranslation + TargetTranslation, { - fn upward_pass(&self, time: Option) -> Option { - self.p2m(); + fn upward_pass(&self, time: bool) -> Option { + match time { + true => { + let mut times = TimeDict::default(); + // Particle to Multipole + let start = Instant::now(); + self.p2m(); + times.insert("p2m".to_string(), start.elapsed().as_millis()); - let depth = self.fmm.tree().get_depth(); - for level in (1..=depth).rev() { - self.m2m(level) + // Multipole to Multipole + let depth = self.fmm.tree().get_depth(); + let start = Instant::now(); + for level in (1..=depth).rev() { + self.m2m(level) + } + times.insert("m2m".to_string(), start.elapsed().as_millis()); + Some(times) + } + false => { + // Particle to Multipole + self.p2m(); + + // Multipole to Multipole + let depth = self.fmm.tree().get_depth(); + for level in (1..=depth).rev() { + self.m2m(level) + } + None + } } - None } - fn downward_pass(&self, time: Option) -> Option { - None + fn downward_pass(&self, time: bool) -> Option { + let depth = self.fmm.tree().get_depth(); + + match time { + true => { + let mut times = TimeDict::default(); + let mut l2l_time = 0; + let mut m2l_time = 0; + + for level in 2..=depth { + if level < depth { + let start = Instant::now(); + self.l2l(level); + l2l_time += start.elapsed().as_millis(); + } + + let start = Instant::now(); + self.m2l(level); + m2l_time += start.elapsed().as_millis(); + } + + times.insert("l2l".to_string(), l2l_time); + times.insert("m2l".to_string(), m2l_time); + + // Leaf level computations + let start = Instant::now(); + self.p2l(); + times.insert("p2l".to_string(), start.elapsed().as_millis()); + + // Sum all potential contributions + let start = Instant::now(); + self.m2p(); + times.insert("m2p".to_string(), start.elapsed().as_millis()); + + let start = Instant::now(); + self.p2p(); + times.insert("p2p".to_string(), start.elapsed().as_millis()); + + let start = Instant::now(); + self.l2p(); + times.insert("l2p".to_string(), start.elapsed().as_millis()); + + Some(times) + } + false => { + for level in 2..=depth { + if level > 2 { + self.l2l(level); + } + self.m2l(level); + } + // Leaf level computations + self.p2l(); + + // Sum all potential contributions + self.m2p(); + self.p2p(); + self.l2p(); + + None + } + } } - fn run(&self, time: Option) -> Option { - None + fn run(&self, time: bool) -> Option { + let t1 = self.upward_pass(time); + let t2 = self.downward_pass(time); + + if let (Some(mut t1), Some(t2)) = (t1, t2) { + t1.extend(t2); + Some(t1) + } else { + None + } } } @@ -795,9 +885,9 @@ where U: Scalar + Float + Default, FmmData: SourceTranslation + FieldTranslation + TargetTranslation, { - fn upward_pass(&self, time: Option) -> Option { + fn upward_pass(&self, time: bool) -> Option { match time { - Some(true) => { + true => { let mut times = TimeDict::default(); // Particle to Multipole let start = Instant::now(); @@ -813,7 +903,7 @@ where times.insert("m2m".to_string(), start.elapsed().as_millis()); Some(times) } - Some(false) | None => { + false => { // Particle to Multipole self.p2m(); @@ -827,11 +917,11 @@ where } } - fn downward_pass(&self, time: Option) -> Option { + fn downward_pass(&self, time: bool) -> Option { let depth = self.fmm.tree().get_depth(); match time { - Some(true) => { + true => { let mut times = TimeDict::default(); let mut l2l_time = 0; let mut m2l_time = 0; @@ -871,7 +961,7 @@ where Some(times) } - Some(false) | None => { + false => { for level in 2..=depth { if level > 2 { self.l2l(level); @@ -891,7 +981,7 @@ where } } - fn run(&self, time: Option) -> Option { + fn run(&self, time: bool) -> Option { let t1 = self.upward_pass(time); let t2 = self.downward_pass(time); @@ -967,7 +1057,7 @@ mod test { let datatree = FmmData::new(fmm, &charge_dict); // Run the experiment - datatree.run(None); + datatree.run(false); // Test that direct computation is close to the FMM. let leaf = &datatree.fmm.tree.get_keys(depth).unwrap()[0]; @@ -1064,7 +1154,7 @@ mod test { let datatree = FmmData::new(fmm, &charge_dict); // Run the experiment - datatree.run(Some(true)); + datatree.run(true); // Test that direct computation is close to the FMM. let leaf = &datatree.fmm.tree.get_keys(depth).unwrap()[0]; @@ -1141,15 +1231,7 @@ mod test { // Form charge dict, matching charges with their associated global indices let charge_dict = build_charge_dict(&global_idxs[..], &charges[..]); - let s = Instant::now(); let datatree = FmmData::new(fmm, &charge_dict); - println!("data tree setup old {:?}", s.elapsed()); - datatree.run(Some(true)); - - let s = Instant::now(); - datatree.upward_pass(None); - println!("linear p2m {:?}", s.elapsed()); - assert!(false); let leaf = &datatree.fmm.tree.get_keys(depth).unwrap()[0]; @@ -1227,7 +1309,7 @@ mod test { let datatree = FmmData::new(fmm, &charge_dict); - datatree.run(None); + datatree.run(false); let leaf = &datatree.fmm.tree.get_keys(depth).unwrap()[0]; @@ -1279,13 +1361,12 @@ mod test { let global_idxs = (0..npoints).collect_vec(); let charges = vec![1.0; npoints]; - let order = 6; + let order = 9; let alpha_inner = 1.05; let alpha_outer = 2.95; let adaptive = false; let ncrit = 150; - // TODO: There is a bug for when boxes are empty ... let depth = 5; let kernel = Laplace3dKernel::default(); @@ -1311,12 +1392,8 @@ mod test { println!("data tree setup {:?}", s.elapsed()); let s = Instant::now(); - datatree.p2m(); - for level in (1..=depth).rev() { - datatree.m2m(level) - } - - println!("linear upward pass {:?}", s.elapsed()); + let times: Option> = datatree.run(true); + println!("linear upward pass {:?} {:?}", s.elapsed(), times.unwrap()); let kernel = Laplace3dKernel::default(); @@ -1346,17 +1423,14 @@ mod test { let new_leaf = datatree.fmm.tree().get_all_leaves().unwrap()[idx]; let new_key = datatree.fmm.tree().get_all_keys().unwrap()[idx]; - println!("old {:?} new {:?} keys", old_key, new_key); + // println!("old {:?} new {:?} keys", old_key, new_key); let (l, r) = datatree.charge_index_pointer[idx]; // let new_points = &datatree.fmm.tree().get_all_coordinates().unwrap()[l*3..r*3]; - let s = Instant::now(); - old_datatree.p2m(); - for level in (1..=depth).rev() { - old_datatree.m2m(level) - } - println!("old upward pass {:?}", s.elapsed()); + // let s = Instant::now(); + // let times = old_datatree.run(true); + // println!("old upward pass {:?} {:?}", s.elapsed(), times.unwrap()); // Check potentials let midx = datatree.fmm.tree().key_to_index.get(&new_key).unwrap(); diff --git a/fmm/src/fmm_hashmap.rs b/fmm/src/fmm_hashmap.rs new file mode 100644 index 00000000..e69de29b diff --git a/fmm/src/fmm_linear.rs b/fmm/src/fmm_linear.rs index 10a95a58..5b2e4748 100644 --- a/fmm/src/fmm_linear.rs +++ b/fmm/src/fmm_linear.rs @@ -1,10 +1,13 @@ +//! kiFMM based on simple linear data structures that minimises memory allocations, maximises cache re-use. use std::{ collections::HashMap, ops::{Deref, DerefMut}, sync::{Arc, Mutex, RwLock}, }; - +use num::Zero; use bempp_tools::Array3D; +use cauchy::c64; +use fftw::{plan::{C2RPlan64, R2CPlan64, C2RPlan, R2CPlan}, types::Flag, array::AlignedVec}; use itertools::Itertools; use num::{Complex, Float}; use rayon::prelude::*; @@ -25,7 +28,7 @@ use bempp_traits::{ }; use bempp_tree::types::{morton::MortonKey, single_node::SingleNodeTree}; -use crate::types::{FmmData, FmmDataLinear, KiFmm, KiFmmLinear, SendPtr, SendPtrMut}; +use crate::types::{FmmData, FmmDataLinear, KiFmm, KiFmmLinear, SendPtr, SendPtrMut, SendPtrMutIter}; use rlst::{ algorithms::{linalg::DenseMatrixLinAlgBuilder, traits::svd::Svd}, common::traits::*, @@ -192,7 +195,45 @@ where Dynamic, >, { - fn l2l(&self, level: u64) {} + fn l2l<'a>(&self, level: u64) { + if let Some(sources) = self.fmm.tree().get_keys(level) { + let ncoeffs = self.fmm.m2l.ncoeffs(self.fmm.order); + + let nsources = sources.len(); + let min = &sources[0]; + let max = &sources[nsources - 1]; + let min_idx = self.fmm.tree().key_to_index.get(min).unwrap(); + let max_idx = self.fmm.tree().key_to_index.get(max).unwrap(); + + let locals = &self.locals[min_idx * ncoeffs..(max_idx + 1) * ncoeffs]; + + let nsiblings = 8; + let mut max_chunk_size = 8_i32.pow((level - 1).try_into().unwrap()) as usize; + + if max_chunk_size > P2M_MAX_CHUNK_SIZE { + max_chunk_size = P2M_MAX_CHUNK_SIZE; + } + let chunk_size = find_chunk_size(nsources, max_chunk_size); + locals + .par_chunks_exact(nsiblings * ncoeffs*chunk_size) + .zip(self.level_multipoles[(level + 1) as usize].par_chunks_exact(chunk_size)) + .for_each(|(multipole_chunk, parent)| { + + unsafe { + let tmp = rlst_pointer_mat!['a, V, multipole_chunk.as_ptr(), (ncoeffs*nsiblings, chunk_size), (1, ncoeffs*nsiblings)]; + let tmp = self.fmm.l2l.dot(&tmp).eval(); + + for i in 0..chunk_size { + let mut ptr = parent[i].raw; + for j in 0..ncoeffs { + *ptr += tmp.data()[(i*ncoeffs)+j]; + ptr = ptr.add(1) + } + } + } + }) + } + } fn m2p<'a>(&self) {} @@ -233,6 +274,359 @@ where let Some(targets) = self.fmm.tree().get_keys(level) else { return; }; + // Form signals to use for convolution first + let n = 2 * self.fmm.order - 1; + let ntargets = targets.len(); + let ncoeffs = self.fmm.m2l.ncoeffs(self.fmm.order); + + // Pad the signal + let &(m, n, o) = &(n, n, n); + + let p = m + 1; + let q = n + 1; + let r = o + 1; + let size = p * q * r; + let size_real = p * q * (r / 2 + 1); + let pad_size = (p - m, q - n, r - o); + let pad_index = (p - m, q - n, r - o); + // let mut padded_signals = rlst_col_vec![U, size * ntargets]; + let mut padded_signals = vec![U::zero(); size*ntargets]; + + let padded_signals_chunks = padded_signals.par_chunks_exact_mut(size); + + let ntargets = targets.len(); + let min = &targets[0]; + let max = &targets[ntargets - 1]; + let min_idx = self.fmm.tree().key_to_index.get(min).unwrap(); + let max_idx = self.fmm.tree().key_to_index.get(max).unwrap(); + + let multipoles = &self.multipoles[min_idx * ncoeffs..(max_idx + 1) * ncoeffs]; + + let multipoles_chunks = multipoles.par_chunks_exact(ncoeffs); + + padded_signals_chunks + .zip(multipoles_chunks) + .for_each(|(padded_signal, multipole)| { + let signal = self.fmm.m2l.compute_signal(self.fmm.order, multipole); + + let mut tmp = pad3(&signal, pad_size, pad_index); + + padded_signal.copy_from_slice(tmp.get_data()); + }); + + + // Allocating and handling this vec of structs is really shit + // let mut padded_signals_hat = rlst_col_vec![Complex, size_real * ntargets]; + let mut padded_signals_hat = vec![Complex::::zero(); size_real*ntargets]; + let mut padded_signals_hat = unsafe {rlst_pointer_mat!['a, Complex, padded_signals_hat.as_mut_ptr(), (size_real*ntargets, 1), (1,1)]}; + + // U::rfft3_fftw_par_vec(&mut padded_signals, &mut padded_signals_hat, &[p, q, r]); + + // // let mut real_parts = Vec::with_capacity(padded_signals_hat.data().len()); + // // let mut imag_parts = Vec::with_capacity(padded_signals_hat.data().len()); + + // // for complex_val in padded_signals_hat.data().iter() { + // // real_parts.push(complex_val.re); + // // imag_parts.push(complex_val.im); + // // } + + // let kernel_data_halo = &self.fmm.m2l.operator_data.kernel_data_rearranged; + // let ntargets = targets.len(); + // let nparents = ntargets / 8; + // let mut global_check_potentials_hat = rlst_col_vec![Complex, size_real * ntargets]; + // let mut global_check_potentials = rlst_col_vec![U, size * ntargets]; + + // Get check potentials in frequency order + // let mut global_check_potentials_hat_freq = vec![Vec::new(); size_real]; + + // unsafe { + // let ptr = global_check_potentials_hat.get_pointer_mut(); + // for (i, elem) in global_check_potentials_hat_freq + // .iter_mut() + // .enumerate() + // .take(size_real) + // { + // for j in 0..ntargets { + // let raw = ptr.offset((j * size_real + i).try_into().unwrap()); + // let send_ptr = SendPtrMut { raw }; + // elem.push(send_ptr); + // } + // } + // } + + // // Get signals into frequency order + // let mut padded_signals_hat_freq = vec![Vec::new(); size_real]; + // let zero = rlst_col_vec![Complex, 8]; + // unsafe { + // let ptr = padded_signals_hat.get_pointer(); + + // for (i, elem) in padded_signals_hat_freq + // .iter_mut() + // .enumerate() + // .take(size_real) + // { + // for j in 0..ntargets { + // let raw = ptr.offset((j * size_real + i).try_into().unwrap()); + // let send_ptr = SendPtr { raw }; + // elem.push(send_ptr); + // } + // // put in a bunch of zeros at the end + // let ptr = zero.get_pointer(); + // for _ in 0..8 { + // let send_ptr = SendPtr { raw: ptr }; + // elem.push(send_ptr) + // } + // } + // } + + // // Create a map between targets and index positions in vec of len 'ntargets' + // let mut target_map = HashMap::new(); + + // for (i, t) in targets.iter().enumerate() { + // target_map.insert(t, i); + // } + // // Find all the displacements used for saving results + // let mut all_displacements = Vec::new(); + // targets.chunks_exact(8).for_each(|sibling_chunk| { + // // not in Morton order (refer to sort method when called on 'neighbours') + // let parent_neighbours: Vec> = + // sibling_chunk[0].parent().all_neighbors(); + + // let displacements = parent_neighbours + // .iter() + // .map(|pn| { + // let mut tmp = Vec::new(); + // if let Some(pn) = pn { + // if self.fmm.tree.keys_set.contains(pn) { + // let mut children = pn.children(); + // children.sort(); + // for child in children { + // // tmp.push(*target_map.get(&child).unwrap() as i64) + // tmp.push(*target_map.get(&child).unwrap()) + // } + // } else { + // for i in 0..8 { + // // tmp.push(-1 as i64) + // tmp.push(ntargets + i) + // } + // } + // } else { + // for i in 0..8 { + // tmp.push(ntargets + i) + // } + // } + + // assert!(tmp.len() == 8); + // tmp + // }) + // .collect_vec(); + // all_displacements.push(displacements); + // }); + + // // let scale = self.m2l_scale(level); + // let scale = Complex::from(self.m2l_scale(level)); + + // let chunk_size = 64; + + // let mut all_save_locations = Vec::new(); + // // nchunks long + // let mut all_displacements_chunked = Vec::new(); + + // (0..nparents).step_by(chunk_size).for_each(|chunk_start| { + // let chunk_end = std::cmp::min(chunk_size+chunk_start, nparents); + + // // lookup save locations + // let save_locations = (chunk_start..chunk_end).map(|sibling_idx| { + // sibling_idx*8 + // }).collect_vec(); + // all_save_locations.push(save_locations); + + // // 26 long + // let mut tmp = Vec::new(); + // for i in 0..26 { + // // chunk_size long + // let tmp2 = (chunk_start..chunk_end).map(|sibling_idx| { + // all_displacements[sibling_idx][i][0] + // }).collect_vec(); + // tmp.push(tmp2); + // } + // all_displacements_chunked.push(tmp); + // }); + + // (0..size_real).into_par_iter().for_each(|freq| { + // // Extract frequency component of signal (ntargets long) + // let padded_signal_freq = &padded_signals_hat_freq[freq]; + + // // Extract frequency components of save locations (ntargets long) + // let check_potential_freq = &global_check_potentials_hat_freq[freq]; + + // (0..nparents).step_by(chunk_size).enumerate().for_each(|(c, chunk_start)| { + // let chunk_end = std::cmp::min(chunk_size+chunk_start, nparents); + + // let first = all_save_locations[c].first().unwrap(); + // let last = all_save_locations[c].last().unwrap(); + // let save_locations = &check_potential_freq[*first..*last+8]; + + // for (i, kernel_data) in kernel_data_halo.iter().enumerate().take(26) { + // let frequency_offset = 64 * freq; + // let kernel_data_i = &kernel_data[frequency_offset..(frequency_offset + 64)]; + + // // lookup signals + // let disps = &all_displacements_chunked[c][i]; + // let signals = disps.iter().map(|d| &padded_signal_freq[*d..d+8]).collect_vec(); + // let nsignals = signals.len(); + + // // Loop over all signals and apply Hadamard product for a specific kernel + // for k in 0..nsignals { + // // println!("save_locations {:?} {:?}", save_locations.len(), nsignals); + // let save_locations_raw = &save_locations[k*8..(k+1)*8]; + + // for j in 0..8 { + // let kernel_data_ij = &kernel_data_i[j * 8..(j + 1) * 8]; + // let sig = signals[k][j].raw; + // unsafe { + // save_locations_raw + // .iter() + // .zip(kernel_data_ij.iter()) + // .for_each(|(&sav, &ker)| *sav.raw += scale * ker * *sig) + // } + // } // inner loop + // } + + // } + + // }); + // }); + + + // // Find all the displacements used for saving results + // let mut all_displacements = Vec::new(); + // targets.chunks_exact(8).for_each(|sibling_chunk| { + // // not in Morton order (refer to sort method when called on 'neighbours') + // let parent_neighbours: Vec> = + // sibling_chunk[0].parent().all_neighbors(); + + // let displacements = parent_neighbours + // .iter() + // .map(|pn| { + // let mut tmp = Vec::new(); + // if let Some(pn) = pn { + // if self.fmm.tree.keys_set.contains(pn) { + // let mut children = pn.children(); + // children.sort(); + // for child in children { + // // tmp.push(*target_map.get(&child).unwrap() as i64) + // tmp.push(*target_map.get(&child).unwrap()) + // } + // } else { + // for i in 0..8 { + // tmp.push(ntargets + i) + // } + // } + // } else { + // for i in 0..8 { + // tmp.push(ntargets + i) + // } + // } + + // assert!(tmp.len() == 8); + // tmp + // }) + // .collect_vec(); + // all_displacements.push(displacements); + // }); + + // let scale = Complex::from(self.m2l_scale(level)); + + // (0..size_real).into_par_iter().for_each(|freq| { + // // Extract frequency component of signal (ntargets long) + // let padded_signal_freq = &padded_signals_hat_freq[freq]; + + // // Extract frequency components of save locations (ntargets long) + // let check_potential_freq = &global_check_potentials_hat_freq[freq]; + + // (0..nparents).for_each(|sibling_idx| { + // // lookup associated save locations for our current sibling set + // let save_locations = + // &check_potential_freq[(sibling_idx * 8)..(sibling_idx + 1) * 8]; + // let save_locations_raw = save_locations.iter().map(|s| s.raw).collect_vec(); + + // // for each halo position compute convolutions to a given sibling set + // for (i, kernel_data) in kernel_data_halo.iter().enumerate().take(26) { + // let frequency_offset = 64 * freq; + // let kernel_data_i = &kernel_data[frequency_offset..(frequency_offset + 64)]; + + // // Find displacements for signal being translated + // let displacements = &all_displacements[sibling_idx][i]; + + // // Lookup signal to be translated if a translation is to be performed + // let signal = &padded_signal_freq[(displacements[0])..=(displacements[7])]; + // for j in 0..8 { + // let kernel_data_ij = &kernel_data_i[j * 8..(j + 1) * 8]; + // let sig = signal[j].raw; + // unsafe { + // save_locations_raw + // .iter() + // .zip(kernel_data_ij.iter()) + // .for_each(|(&sav, &ker)| *sav += scale * ker * *sig) + // } + // } // inner loop + // } + // }); // over each sibling set + // }); + + // U::irfft_fftw_par_vec( + // &mut global_check_potentials_hat, + // &mut global_check_potentials, + // &[p, q, r], + // ); + + // // Compute local expansion coefficients and save to data tree + // let (_, multi_indices) = MortonKey::surface_grid::(self.fmm.order); + + // let check_potentials = global_check_potentials + // .data() + // .chunks_exact(size) + // .flat_map(|chunk| { + // let m = 2 * self.fmm.order - 1; + // let p = m + 1; + // let mut potentials = Array3D::new((p, p, p)); + // potentials.get_data_mut().copy_from_slice(chunk); + + // let mut tmp = Vec::new(); + // let ntargets = multi_indices.len() / 3; + // let xs = &multi_indices[0..ntargets]; + // let ys = &multi_indices[ntargets..2 * ntargets]; + // let zs = &multi_indices[2 * ntargets..]; + + // for i in 0..ntargets { + // let val = potentials.get(zs[i], ys[i], xs[i]).unwrap(); + // tmp.push(*val); + // } + // tmp + // }) + // .collect_vec(); + + + // // This should be blocked and use blas3 + // let ncoeffs = self.fmm.m2l.ncoeffs(self.fmm.order); + // let check_potentials = unsafe { + // rlst_pointer_mat!['a, U, check_potentials.as_ptr(), (ncoeffs, ntargets), (1, ncoeffs)] + // }; + + // let mut tmp = self + // .fmm + // .dc2e_inv_1 + // .dot(&self.fmm.dc2e_inv_2.dot(&check_potentials)) + // .eval(); + + + // tmp.data_mut() + // .iter_mut() + // .for_each(|d| *d *= self.fmm.kernel.scale(level)); + // let locals = tmp; + + } fn m2l_scale(&self, level: u64) -> U { diff --git a/fmm/src/types.rs b/fmm/src/types.rs index af62acf6..97e91184 100644 --- a/fmm/src/types.rs +++ b/fmm/src/types.rs @@ -6,7 +6,7 @@ use std::{ use bempp_traits::{field::FieldTranslationData, fmm::Fmm, kernel::Kernel, tree::Tree}; use bempp_tree::types::{morton::MortonKey, point::Point}; use cauchy::Scalar; -use num::Float; +use num::{Float, Complex}; use rlst::dense::traits::*; use rlst::dense::{base_matrix::BaseMatrix, data_container::VectorContainer, matrix::Matrix}; use rlst::{self}; @@ -193,6 +193,15 @@ pub struct SendPtrMut { } unsafe impl Sync for SendPtrMut {} +unsafe impl Send for SendPtrMut> {} + + +impl Default for SendPtrMut { + + fn default() -> Self { + SendPtrMut { raw: std::ptr::null_mut() } + } +} /// A threadsafe raw pointer #[derive(Clone, Debug, Copy)] @@ -201,3 +210,38 @@ pub struct SendPtr { } unsafe impl Sync for SendPtr {} + + + +impl Default for SendPtr { + + fn default() -> Self { + SendPtr { raw: std::ptr::null() } + } +} + +pub struct SendPtrMutIter<'a, T> { + vec: &'a Vec>, + current: usize, +} + + +impl<'a, T> SendPtrMutIter<'a, T> { + pub fn new(vec: &'a Vec>) -> Self { + SendPtrMutIter { vec, current: 0 } + } +} + +impl<'a, T> Iterator for SendPtrMutIter<'a, T> { + type Item = &'a SendPtrMut; // Change this to the type of item you want to return + + fn next(&mut self) -> Option { + if self.current >= self.vec.len() { + None + } else { + let item = &self.vec[self.current]; + self.current += 1; + Some(item) + } + } +} \ No newline at end of file