diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index c338d40e..e4f3547e 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -246,7 +246,7 @@ impl GroveDb { "Unable to create to load chunk".to_string(), )), } - }, + } Err(_) => Err(Error::CorruptedData( "Unable to create Chunk producer".to_string(), )), @@ -271,7 +271,7 @@ impl GroveDb { "Unable to create to load chunk".to_string(), )), } - }, + } Err(_) => Err(Error::CorruptedData( "Unable to create Chunk producer".to_string(), )), diff --git a/merk/src/merk/chunks.rs b/merk/src/merk/chunks.rs index de0b5fe0..f6b1b64c 100644 --- a/merk/src/merk/chunks.rs +++ b/merk/src/merk/chunks.rs @@ -39,9 +39,9 @@ use crate::{ error::ChunkError, util::{ chunk_height, chunk_index_from_traversal_instruction, - chunk_index_from_traversal_instruction_with_recovery, generate_traversal_instruction, - generate_traversal_instruction_as_vec_bytes, vec_bytes_as_traversal_instruction, - number_of_chunks, + chunk_index_from_traversal_instruction_with_recovery, + generate_traversal_instruction, generate_traversal_instruction_as_vec_bytes, + number_of_chunks, vec_bytes_as_traversal_instruction, }, }, Node, Op, @@ -383,7 +383,9 @@ where self.chunk_with_index(self.index) .and_then(|(chunk, chunk_index)| { chunk_index - .map(|index| generate_traversal_instruction_as_vec_bytes(self.height, index)) + .map(|index| { + generate_traversal_instruction_as_vec_bytes(self.height, index) + }) .transpose() .map(|v| (chunk, v)) }), diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index fd61c2f0..9e26b1af 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -41,7 +41,7 @@ use crate::{ chunk::{LEFT, RIGHT}, chunk_op::ChunkOp, error::{ChunkError, ChunkError::InternalError}, - util::{vec_bytes_as_traversal_instruction, traversal_instruction_as_vec_bytes}, + util::{traversal_instruction_as_vec_bytes, vec_bytes_as_traversal_instruction}, }, tree::{execute, Child, Tree as ProofTree}, Node, Op, @@ -122,7 +122,10 @@ impl<'db, S: StorageContext<'db>> Restorer { /// Process multi chunks (space optimized chunk proofs that can contain /// multiple singular chunks) - pub fn process_multi_chunk(&mut self, multi_chunk: Vec) -> Result>, Error> { + pub fn process_multi_chunk( + &mut self, + multi_chunk: Vec, + ) -> Result>, Error> { let mut expect_chunk_id = true; let mut chunk_ids = vec![]; let mut current_chunk_id = vec![]; @@ -241,7 +244,8 @@ impl<'db, S: StorageContext<'db>> Restorer { Node::Hash(hash) => { // the node hash points to the root of another chunk // we get the chunk id and add the hash to restorer state - let chunk_id = traversal_instruction_as_vec_bytes(node_traversal_instruction); + let chunk_id = + traversal_instruction_as_vec_bytes(node_traversal_instruction); new_chunk_ids.push(chunk_id.to_vec()); self.chunk_id_to_root_hash.insert(chunk_id.to_vec(), *hash); // TODO: handle unwrap @@ -670,7 +674,10 @@ mod tests { let (chunk, _) = chunk_producer.chunk_with_index(1).unwrap(); // apply first chunk let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(vec![].as_slice()), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(vec![].as_slice()), + chunk, + ) .expect("should process chunk successfully"); assert_eq!(new_chunk_ids.len(), 4); @@ -679,22 +686,22 @@ mod tests { assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); // assert all the chunk hash values assert_eq!( - restorer.chunk_id_to_root_hash.get(vec![1,1].as_slice()), + restorer.chunk_id_to_root_hash.get(vec![1, 1].as_slice()), Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[LEFT, LEFT])).unwrap()) .as_ref() ); assert_eq!( - restorer.chunk_id_to_root_hash.get(vec![1,0].as_slice()), + restorer.chunk_id_to_root_hash.get(vec![1, 0].as_slice()), Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[LEFT, RIGHT])).unwrap()) .as_ref() ); assert_eq!( - restorer.chunk_id_to_root_hash.get(vec![0,1].as_slice()), + restorer.chunk_id_to_root_hash.get(vec![0, 1].as_slice()), Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[RIGHT, LEFT])).unwrap()) .as_ref() ); assert_eq!( - restorer.chunk_id_to_root_hash.get(vec![0,0].as_slice()), + restorer.chunk_id_to_root_hash.get(vec![0, 0].as_slice()), Some(get_node_hash(traverse_get_node_hash(&mut tree_walker, &[RIGHT, RIGHT])).unwrap()) .as_ref() ); @@ -703,18 +710,26 @@ mod tests { let (chunk, _) = chunk_producer.chunk_with_index(2).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&vec![LEFT, LEFT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&vec![LEFT, LEFT]), + chunk, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element assert_eq!(restorer.chunk_id_to_root_hash.len(), 3); - assert_eq!(restorer.chunk_id_to_root_hash.get(vec![1,1].as_slice()), None); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![1, 1].as_slice()), + None + ); // let's try to apply the second chunk again, should not work let (chunk, _) = chunk_producer.chunk_with_index(2).unwrap(); // apply second chunk - let chunk_process_result = - restorer.process_chunk(&traversal_instruction_as_vec_bytes(&vec![LEFT, LEFT]), chunk); + let chunk_process_result = restorer.process_chunk( + &traversal_instruction_as_vec_bytes(&vec![LEFT, LEFT]), + chunk, + ); assert!(chunk_process_result.is_err()); assert!(matches!( chunk_process_result, @@ -724,8 +739,10 @@ mod tests { // next let's get a random but expected chunk and work with that e.g. chunk 4 // but let's apply it to the wrong place let (chunk, _) = chunk_producer.chunk_with_index(4).unwrap(); - let chunk_process_result = - restorer.process_chunk(&traversal_instruction_as_vec_bytes(&vec![LEFT, RIGHT]), chunk); + let chunk_process_result = restorer.process_chunk( + &traversal_instruction_as_vec_bytes(&vec![LEFT, RIGHT]), + chunk, + ); assert!(chunk_process_result.is_err()); assert!(matches!( chunk_process_result, @@ -738,34 +755,52 @@ mod tests { let (chunk, _) = chunk_producer.chunk_with_index(5).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&vec![RIGHT, RIGHT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&vec![RIGHT, RIGHT]), + chunk, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element assert_eq!(restorer.chunk_id_to_root_hash.len(), 2); - assert_eq!(restorer.chunk_id_to_root_hash.get(vec![0,0].as_slice()), None); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![0, 0].as_slice()), + None + ); // correctly apply chunk 3 let (chunk, _) = chunk_producer.chunk_with_index(3).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&vec![LEFT, RIGHT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&vec![LEFT, RIGHT]), + chunk, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element assert_eq!(restorer.chunk_id_to_root_hash.len(), 1); - assert_eq!(restorer.chunk_id_to_root_hash.get(vec![1,0].as_slice()), None); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![1, 0].as_slice()), + None + ); // correctly apply chunk 4 let (chunk, _) = chunk_producer.chunk_with_index(4).unwrap(); // apply second chunk let new_chunk_ids = restorer - .process_chunk(&traversal_instruction_as_vec_bytes(&vec![RIGHT, LEFT]), chunk) + .process_chunk( + &traversal_instruction_as_vec_bytes(&vec![RIGHT, LEFT]), + chunk, + ) .unwrap(); assert_eq!(new_chunk_ids.len(), 0); // chunk_map should have 1 less element assert_eq!(restorer.chunk_id_to_root_hash.len(), 0); - assert_eq!(restorer.chunk_id_to_root_hash.get(vec![0,1].as_slice()), None); + assert_eq!( + restorer.chunk_id_to_root_hash.get(vec![0, 1].as_slice()), + None + ); // finalize merk let restored_merk = restorer.finalize().expect("should finalized successfully"); @@ -862,9 +897,7 @@ mod tests { // perform chunk production and processing let mut chunk_id_opt = Some(vec![]); while let Some(chunk_id) = chunk_id_opt { - let (chunk, next_chunk_id) = chunk_producer - .chunk(&chunk_id) - .expect("should get chunk"); + let (chunk, next_chunk_id) = chunk_producer.chunk(&chunk_id).expect("should get chunk"); restorer .process_chunk(&chunk_id, chunk) .expect("should process chunk successfully"); @@ -1072,7 +1105,7 @@ mod tests { // should only contain the first chunk assert_eq!(multi_chunk.chunk.len(), 2); // should point to chunk 2 - assert_eq!(multi_chunk.next_index, Some(vec![1,1])); + assert_eq!(multi_chunk.next_index, Some(vec![1, 1])); let next_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); assert_eq!(next_ids.len(), 4); assert_eq!(restorer.chunk_id_to_root_hash.len(), 4); @@ -1085,7 +1118,7 @@ mod tests { .multi_chunk_with_limit(multi_chunk.next_index.unwrap().as_slice(), Some(645)) .unwrap(); assert_eq!(multi_chunk.chunk.len(), 4); - assert_eq!(multi_chunk.next_index, Some(vec![0u8,1u8])); + assert_eq!(multi_chunk.next_index, Some(vec![0u8, 1u8])); let next_ids = restorer.process_multi_chunk(multi_chunk.chunk).unwrap(); // chunks 2 and 3 are leaf chunks assert_eq!(next_ids.len(), 0); diff --git a/merk/src/proofs/chunk/util.rs b/merk/src/proofs/chunk/util.rs index 4394cfca..39c513b7 100644 --- a/merk/src/proofs/chunk/util.rs +++ b/merk/src/proofs/chunk/util.rs @@ -171,7 +171,10 @@ fn exit_node_count(height: usize) -> usize { } /// Generate instruction for traversing to a given chunk index in a binary tree -pub fn generate_traversal_instruction(height: usize, chunk_index: usize) -> Result, Error> { +pub fn generate_traversal_instruction( + height: usize, + chunk_index: usize, +) -> Result, Error> { let mut instructions = vec![]; let total_chunk_count = number_of_chunks(height); @@ -226,8 +229,8 @@ pub fn generate_traversal_instruction(height: usize, chunk_index: usize) -> Resu Ok(instructions) } -/// Determine the chunk index given the traversal instruction and the max height of -/// the tree +/// Determine the chunk index given the traversal instruction and the max height +/// of the tree pub fn chunk_index_from_traversal_instruction( traversal_instruction: &[bool], height: usize, @@ -309,8 +312,8 @@ pub fn chunk_index_from_traversal_instruction( Ok(current_chunk_index) } -/// Determine the chunk index given the traversal instruction and the max height of -/// the tree. This can recover from traversal instructions not pointing to a +/// Determine the chunk index given the traversal instruction and the max height +/// of the tree. This can recover from traversal instructions not pointing to a /// chunk boundary, in such a case, it backtracks until it hits a chunk /// boundary. pub fn chunk_index_from_traversal_instruction_with_recovery( @@ -349,7 +352,9 @@ pub fn traversal_instruction_as_vec_bytes(instruction: &[bool]) -> Vec { /// Converts a vec bytes that represents a traversal instruction /// to a vec of bool, true = left and false = right -pub fn vec_bytes_as_traversal_instruction(instruction_vec_bytes: &[u8]) -> Result, Error> { +pub fn vec_bytes_as_traversal_instruction( + instruction_vec_bytes: &[u8], +) -> Result, Error> { instruction_vec_bytes .iter() .map(|byte| match byte { @@ -579,8 +584,14 @@ mod test { #[test] fn test_instruction_string_to_traversal_instruction() { - assert_eq!(vec_bytes_as_traversal_instruction(&vec![1u8]).unwrap(), vec![LEFT]); - assert_eq!(vec_bytes_as_traversal_instruction(&vec![0u8]).unwrap(), vec![RIGHT]); + assert_eq!( + vec_bytes_as_traversal_instruction(&vec![1u8]).unwrap(), + vec![LEFT] + ); + assert_eq!( + vec_bytes_as_traversal_instruction(&vec![0u8]).unwrap(), + vec![RIGHT] + ); assert_eq!( vec_bytes_as_traversal_instruction(&vec![0u8, 0u8, 1u8]).unwrap(), vec![RIGHT, RIGHT, LEFT]