diff --git a/Cargo.lock b/Cargo.lock index b671f7ebf6..0811e41d08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1087,8 +1087,7 @@ dependencies = [ [[package]] name = "incrementalmerkletree" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "361c467824d4d9d4f284be4b2608800839419dccc4d4608f28345237fe354623" +source = "git+https://github.com/nuttycom/incrementalmerkletree?rev=fa147c89c6c98a03bba745538f4e68d4eaed5146#fa147c89c6c98a03bba745538f4e68d4eaed5146" dependencies = [ "either", "proptest", @@ -1476,8 +1475,7 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "orchard" version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb255c3ffdccd3c84fe9ebed72aef64fdc72e6a3e4180dd411002d47abaad42" +source = "git+https://github.com/nuttycom/orchard?rev=7ef1feaf1672980095f424be42fd5f79ba01a5aa#7ef1feaf1672980095f424be42fd5f79ba01a5aa" dependencies = [ "aes", "bitvec", @@ -2246,8 +2244,7 @@ dependencies = [ [[package]] name = "shardtree" version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf20c7a2747d9083092e3a3eeb9a7ed75577ae364896bebbc5e0bdcd4e97735" +source = "git+https://github.com/nuttycom/incrementalmerkletree?rev=fa147c89c6c98a03bba745538f4e68d4eaed5146#fa147c89c6c98a03bba745538f4e68d4eaed5146" dependencies = [ "assert_matches", "bitflags 2.4.1", @@ -3056,6 +3053,7 @@ name = "zcash_client_sqlite" version = "0.9.1" dependencies = [ "assert_matches", + "bls12_381", "bs58", "byteorder", "document-features", diff --git a/Cargo.toml b/Cargo.toml index d90c6d3e04..dc2efce9fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,3 +120,8 @@ zip32 = "0.1" lto = true panic = 'abort' codegen-units = 1 + +[patch.crates-io] +incrementalmerkletree = { git = "https://github.com/nuttycom/incrementalmerkletree", rev = "fa147c89c6c98a03bba745538f4e68d4eaed5146" } +shardtree = { git = "https://github.com/nuttycom/incrementalmerkletree", rev = "fa147c89c6c98a03bba745538f4e68d4eaed5146" } +orchard = { git = "https://github.com/nuttycom/orchard", rev = "7ef1feaf1672980095f424be42fd5f79ba01a5aa" } diff --git a/zcash_client_backend/src/data_api.rs b/zcash_client_backend/src/data_api.rs index 5a72ea9309..3c96d2e14e 100644 --- a/zcash_client_backend/src/data_api.rs +++ b/zcash_client_backend/src/data_api.rs @@ -67,7 +67,10 @@ use incrementalmerkletree::{frontier::Frontier, Retention}; use secrecy::SecretVec; use shardtree::{error::ShardTreeError, store::ShardStore, ShardTree}; -use self::{chain::CommitmentTreeRoot, scanning::ScanRange}; +use self::{ + chain::{ChainState, CommitmentTreeRoot}, + scanning::ScanRange, +}; use crate::{ address::UnifiedAddress, decrypt::DecryptedOutput, @@ -1260,8 +1263,11 @@ pub trait WalletWrite: WalletRead { /// pertaining to this wallet. /// /// `blocks` must be sequential, in order of increasing block height - fn put_blocks(&mut self, blocks: Vec>) - -> Result<(), Self::Error>; + fn put_blocks( + &mut self, + from_state: &ChainState, + blocks: Vec>, + ) -> Result<(), Self::Error>; /// Updates the wallet's view of the blockchain. /// @@ -1400,9 +1406,11 @@ pub mod testing { }; use super::{ - chain::CommitmentTreeRoot, scanning::ScanRange, AccountBirthday, BlockMetadata, - DecryptedTransaction, InputSource, NullifierQuery, ScannedBlock, SentTransaction, - WalletCommitmentTrees, WalletRead, WalletSummary, WalletWrite, SAPLING_SHARD_HEIGHT, + chain::{ChainState, CommitmentTreeRoot}, + scanning::ScanRange, + AccountBirthday, BlockMetadata, DecryptedTransaction, InputSource, NullifierQuery, + ScannedBlock, SentTransaction, WalletCommitmentTrees, WalletRead, WalletSummary, + WalletWrite, SAPLING_SHARD_HEIGHT, }; #[cfg(feature = "transparent-inputs")] @@ -1633,6 +1641,7 @@ pub mod testing { #[allow(clippy::type_complexity)] fn put_blocks( &mut self, + _from_state: &ChainState, _blocks: Vec>, ) -> Result<(), Self::Error> { Ok(()) diff --git a/zcash_client_backend/src/data_api/chain.rs b/zcash_client_backend/src/data_api/chain.rs index 5cd911c522..965e54db14 100644 --- a/zcash_client_backend/src/data_api/chain.rs +++ b/zcash_client_backend/src/data_api/chain.rs @@ -145,6 +145,7 @@ use std::ops::Range; +use incrementalmerkletree::frontier::Frontier; use subtle::ConditionallySelectable; use zcash_primitives::consensus::{self, BlockHeight}; @@ -278,6 +279,68 @@ impl ScanSummary { } } +/// The final note commitment tree state for each shielded pool, as of a particular block height. +#[derive(Debug, Clone)] +pub struct ChainState { + block_height: BlockHeight, + final_sapling_tree: Frontier, + #[cfg(feature = "orchard")] + final_orchard_tree: + Frontier, +} + +impl ChainState { + /// Construct a new empty chain state. + pub fn empty(block_height: BlockHeight) -> Self { + Self { + block_height, + final_sapling_tree: Frontier::empty(), + #[cfg(feature = "orchard")] + final_orchard_tree: Frontier::empty(), + } + } + + /// Construct a new [`ChainState`] from its constituent parts. + pub fn new( + block_height: BlockHeight, + final_sapling_tree: Frontier, + #[cfg(feature = "orchard")] final_orchard_tree: Frontier< + orchard::tree::MerkleHashOrchard, + { orchard::NOTE_COMMITMENT_TREE_DEPTH as u8 }, + >, + ) -> Self { + Self { + block_height, + final_sapling_tree, + #[cfg(feature = "orchard")] + final_orchard_tree, + } + } + + /// Returns the block height to which this chain state applies. + pub fn block_height(&self) -> BlockHeight { + self.block_height + } + + /// Returns the frontier of the Sapling note commitment tree as of the end of the block at + /// [`Self::block_height`]. + pub fn final_sapling_tree( + &self, + ) -> &Frontier { + &self.final_sapling_tree + } + + /// Returns the frontier of the Orchard note commitment tree as of the end of the block at + /// [`Self::block_height`]. + #[cfg(feature = "orchard")] + pub fn final_orchard_tree( + &self, + ) -> &Frontier + { + &self.final_orchard_tree + } +} + /// Scans at most `limit` blocks from the provided block source for in order to find transactions /// received by the accounts tracked in the provided wallet database. /// @@ -290,7 +353,7 @@ pub fn scan_cached_blocks( params: &ParamsT, block_source: &BlockSourceT, data_db: &mut DbT, - from_height: BlockHeight, + from_state: &ChainState, limit: usize, ) -> Result> where @@ -299,6 +362,7 @@ where DbT: WalletWrite, ::AccountId: ConditionallySelectable + Default + Send + 'static, { + let from_height = from_state.block_height + 1; // Fetch the UnifiedFullViewingKeys we are tracking let account_ufvks = data_db .get_unified_full_viewing_keys() @@ -392,7 +456,9 @@ where }, )?; - data_db.put_blocks(scanned_blocks).map_err(Error::Wallet)?; + data_db + .put_blocks(from_state, scanned_blocks) + .map_err(Error::Wallet)?; Ok(scan_summary) } diff --git a/zcash_client_sqlite/Cargo.toml b/zcash_client_sqlite/Cargo.toml index b106fa5ccb..2c247b393b 100644 --- a/zcash_client_sqlite/Cargo.toml +++ b/zcash_client_sqlite/Cargo.toml @@ -78,10 +78,12 @@ maybe-rayon.workspace = true [dev-dependencies] assert_matches.workspace = true +bls12_381.workspace = true incrementalmerkletree = { workspace = true, features = ["test-dependencies"] } pasta_curves.workspace = true shardtree = { workspace = true, features = ["legacy-api", "test-dependencies"] } nonempty.workspace = true +orchard = { workspace = true, features = ["test-dependencies"] } proptest.workspace = true rand_chacha.workspace = true rand_core.workspace = true diff --git a/zcash_client_sqlite/src/lib.rs b/zcash_client_sqlite/src/lib.rs index 992ac9533d..3517a343de 100644 --- a/zcash_client_sqlite/src/lib.rs +++ b/zcash_client_sqlite/src/lib.rs @@ -32,7 +32,7 @@ // Catch documentation errors caused by code changes. #![deny(rustdoc::broken_intra_doc_links)] -use incrementalmerkletree::Position; +use incrementalmerkletree::{Position, Retention}; use maybe_rayon::{ prelude::{IndexedParallelIterator, ParallelIterator}, slice::ParallelSliceMut, @@ -58,7 +58,7 @@ use zcash_client_backend::{ address::UnifiedAddress, data_api::{ self, - chain::{BlockSource, CommitmentTreeRoot}, + chain::{BlockSource, ChainState, CommitmentTreeRoot}, scanning::{ScanPriority, ScanRange}, AccountBirthday, BlockMetadata, DecryptedTransaction, InputSource, NullifierQuery, ScannedBlock, SentTransaction, WalletCommitmentTrees, WalletRead, WalletSummary, @@ -75,7 +75,12 @@ use zcash_client_backend::{ use crate::{error::SqliteClientError, wallet::commitment_tree::SqliteShardStore}; #[cfg(feature = "orchard")] -use zcash_client_backend::data_api::ORCHARD_SHARD_HEIGHT; +use { + incrementalmerkletree::frontier::Frontier, + shardtree::store::{Checkpoint, ShardStore}, + std::collections::BTreeMap, + zcash_client_backend::data_api::ORCHARD_SHARD_HEIGHT, +}; #[cfg(feature = "transparent-inputs")] use { @@ -92,7 +97,6 @@ use { pub mod chain; pub mod error; - pub mod wallet; use wallet::{ commitment_tree::{self, put_shard_roots}, @@ -535,6 +539,7 @@ impl WalletWrite for WalletDb #[allow(clippy::type_complexity)] fn put_blocks( &mut self, + from_state: &ChainState, blocks: Vec>, ) -> Result<(), Self::Error> { struct BlockPositions { @@ -695,62 +700,168 @@ impl WalletWrite for WalletDb { // Create subtrees from the note commitments in parallel. const CHUNK_SIZE: usize = 1024; - { - let sapling_subtrees = sapling_commitments - .par_chunks_mut(CHUNK_SIZE) - .enumerate() - .filter_map(|(i, chunk)| { - let start = - start_positions.sapling_start_position + (i * CHUNK_SIZE) as u64; - let end = start + chunk.len() as u64; - - shardtree::LocatedTree::from_iter( - start..end, - SAPLING_SHARD_HEIGHT.into(), - chunk.iter_mut().map(|n| n.take().expect("always Some")), - ) + let sapling_subtrees = sapling_commitments + .par_chunks_mut(CHUNK_SIZE) + .enumerate() + .filter_map(|(i, chunk)| { + let start = + start_positions.sapling_start_position + (i * CHUNK_SIZE) as u64; + let end = start + chunk.len() as u64; + + shardtree::LocatedTree::from_iter( + start..end, + SAPLING_SHARD_HEIGHT.into(), + chunk.iter_mut().map(|n| n.take().expect("always Some")), + ) + }) + .map(|res| (res.subtree, res.checkpoints)) + .collect::>(); + + #[cfg(feature = "orchard")] + let orchard_subtrees = orchard_commitments + .par_chunks_mut(CHUNK_SIZE) + .enumerate() + .filter_map(|(i, chunk)| { + let start = + start_positions.orchard_start_position + (i * CHUNK_SIZE) as u64; + let end = start + chunk.len() as u64; + + shardtree::LocatedTree::from_iter( + start..end, + ORCHARD_SHARD_HEIGHT.into(), + chunk.iter_mut().map(|n| n.take().expect("always Some")), + ) + }) + .map(|res| (res.subtree, res.checkpoints)) + .collect::>(); + + // Collect the complete set of Sapling checkpoints + #[cfg(feature = "orchard")] + let sapling_checkpoint_positions: BTreeMap = + dbg!(sapling_subtrees + .iter() + .flat_map(|(_, checkpoints)| checkpoints.iter()) + .map(|(k, v)| (*k, *v)) + .collect()); + + #[cfg(feature = "orchard")] + let orchard_checkpoint_positions: BTreeMap = + dbg!(orchard_subtrees + .iter() + .flat_map(|(_, checkpoints)| checkpoints.iter()) + .map(|(k, v)| (*k, *v)) + .collect()); + + #[cfg(feature = "orchard")] + fn copy_checkpoints( + // The set of checkpoints to copy from + from_checkpoint_positions: &BTreeMap, + // The set of checkpoints to copy into + to_checkpoint_positions: &BTreeMap, + // The frontier whose position will be used when there is no preceding + // checkpoint in to_checkpoint_positions. + state_final_tree: &Frontier, + ) -> Vec<(BlockHeight, Checkpoint)> { + from_checkpoint_positions + .keys() + .flat_map(|from_checkpoint_height| { + to_checkpoint_positions + .range::(..=*from_checkpoint_height) + .last() + .map_or_else( + || { + Some(( + *from_checkpoint_height, + state_final_tree.value().map_or_else( + || Checkpoint::tree_empty(), + |t| Checkpoint::at_position(t.position()), + ), + )) + }, + |(to_prev_height, position)| { + if *to_prev_height < *from_checkpoint_height { + Some(( + *from_checkpoint_height, + Checkpoint::at_position(*position), + )) + } else { + // The checkpoint already exists, so we don't need to + // do anything. + None + } + }, + ) + .into_iter() }) - .map(|res| (res.subtree, res.checkpoints)) - .collect::>(); + .collect::>() + } - // Update the Sapling note commitment tree with all newly read note commitments - let mut sapling_subtrees = sapling_subtrees.into_iter(); - wdb.with_sapling_tree_mut::<_, _, Self::Error>(move |sapling_tree| { - for (tree, checkpoints) in &mut sapling_subtrees { + #[cfg(feature = "orchard")] + let missing_sapling_checkpoints = copy_checkpoints( + &orchard_checkpoint_positions, + &sapling_checkpoint_positions, + from_state.final_sapling_tree(), + ); + #[cfg(feature = "orchard")] + let missing_orchard_checkpoints = copy_checkpoints( + &sapling_checkpoint_positions, + &orchard_checkpoint_positions, + from_state.final_orchard_tree(), + ); + + // Update the Sapling note commitment tree with all newly read note commitments + { + let mut sapling_subtrees_iter = sapling_subtrees.into_iter(); + wdb.with_sapling_tree_mut::<_, _, Self::Error>(|sapling_tree| { + sapling_tree.insert_frontier( + from_state.final_sapling_tree().clone(), + Retention::Checkpoint { + id: from_state.block_height(), + is_marked: false, + }, + )?; + + for (tree, checkpoints) in &mut sapling_subtrees_iter { sapling_tree.insert_tree(tree, checkpoints)?; } + // Ensure we have a Sapling checkpoint for each checkpointed Orchard block height + #[cfg(feature = "orchard")] + for (height, checkpoint) in dbg!(&missing_sapling_checkpoints) { + dbg!(sapling_tree + .store_mut() + .add_checkpoint(*height, checkpoint.clone())) + .map_err(ShardTreeError::Storage)?; + } + Ok(()) })?; } - // Create subtrees from the note commitments in parallel. + // Update the Orchard note commitment tree with all newly read note commitments #[cfg(feature = "orchard")] { - let orchard_subtrees = orchard_commitments - .par_chunks_mut(CHUNK_SIZE) - .enumerate() - .filter_map(|(i, chunk)| { - let start = - start_positions.orchard_start_position + (i * CHUNK_SIZE) as u64; - let end = start + chunk.len() as u64; - - shardtree::LocatedTree::from_iter( - start..end, - ORCHARD_SHARD_HEIGHT.into(), - chunk.iter_mut().map(|n| n.take().expect("always Some")), - ) - }) - .map(|res| (res.subtree, res.checkpoints)) - .collect::>(); - - // Update the Orchard note commitment tree with all newly read note commitments let mut orchard_subtrees = orchard_subtrees.into_iter(); - wdb.with_orchard_tree_mut::<_, _, Self::Error>(move |orchard_tree| { + wdb.with_orchard_tree_mut::<_, _, Self::Error>(|orchard_tree| { + orchard_tree.insert_frontier( + from_state.final_orchard_tree().clone(), + Retention::Checkpoint { + id: from_state.block_height(), + is_marked: false, + }, + )?; + for (tree, checkpoints) in &mut orchard_subtrees { orchard_tree.insert_tree(tree, checkpoints)?; } + for (height, checkpoint) in dbg!(&missing_orchard_checkpoints) { + dbg!(orchard_tree + .store_mut() + .add_checkpoint(*height, checkpoint.clone())) + .map_err(ShardTreeError::Storage)?; + } + Ok(()) })?; } diff --git a/zcash_client_sqlite/src/testing.rs b/zcash_client_sqlite/src/testing.rs index 8682e5449b..bd08e61795 100644 --- a/zcash_client_sqlite/src/testing.rs +++ b/zcash_client_sqlite/src/testing.rs @@ -1,10 +1,11 @@ -use std::convert::Infallible; use std::fmt; use std::num::NonZeroU32; +use std::{collections::BTreeMap, convert::Infallible}; #[cfg(feature = "unstable")] use std::fs::File; +use group::ff::Field; use nonempty::NonEmpty; use prost::Message; use rand_chacha::ChaChaRng; @@ -45,6 +46,7 @@ use zcash_client_backend::{ zip321, }; use zcash_client_backend::{ + data_api::chain::ChainState, fees::{standard, DustOutputPolicy}, ShieldedProtocol, }; @@ -76,8 +78,9 @@ use super::BlockDb; #[cfg(feature = "orchard")] use { - group::ff::{Field, PrimeField}, + group::ff::PrimeField, orchard::note_encryption::{OrchardDomain, OrchardNoteEncryption}, + orchard::tree::MerkleHashOrchard, pasta_curves::pallas, zcash_client_backend::proto::compact_formats::CompactOrchardAction, }; @@ -177,7 +180,8 @@ impl TestBuilder { TestState { cache: self.cache, - latest_cached_block: None, + cached_blocks: BTreeMap::new(), + latest_block_height: None, _data_file: data_file, db_data, test_account, @@ -186,9 +190,10 @@ impl TestBuilder { } } +#[derive(Clone, Debug)] pub(crate) struct CachedBlock { - height: BlockHeight, hash: BlockHash, + chain_state: ChainState, sapling_end_size: u32, orchard_end_size: u32, } @@ -196,44 +201,87 @@ pub(crate) struct CachedBlock { impl CachedBlock { fn none(sapling_activation_height: BlockHeight) -> Self { Self { - height: sapling_activation_height, hash: BlockHash([0; 32]), + chain_state: ChainState::empty(sapling_activation_height), sapling_end_size: 0, orchard_end_size: 0, } } fn at( - height: BlockHeight, hash: BlockHash, - sapling_tree_size: u32, - orchard_tree_size: u32, + chain_state: ChainState, + sapling_end_size: u32, + orchard_end_size: u32, ) -> Self { + assert_eq!( + chain_state.final_sapling_tree().tree_size() as u32, + sapling_end_size + ); + #[cfg(feature = "orchard")] + assert_eq!( + chain_state.final_orchard_tree().tree_size() as u32, + orchard_end_size + ); + Self { - height, hash, - sapling_end_size: sapling_tree_size, - orchard_end_size: orchard_tree_size, + chain_state, + sapling_end_size, + orchard_end_size, } } - fn roll_forward(self, cb: &CompactBlock) -> Self { - assert_eq!(self.height + 1, cb.height()); + fn roll_forward(&self, cb: &CompactBlock) -> Self { + assert_eq!(self.chain_state.block_height() + 1, cb.height()); + + let sapling_final_tree = cb.vtx.iter().flat_map(|tx| tx.outputs.iter()).fold( + self.chain_state.final_sapling_tree().clone(), + |mut acc, c_out| { + acc.append(sapling::Node::from_cmu(&c_out.cmu().unwrap())); + acc + }, + ); + let sapling_end_size = sapling_final_tree.tree_size() as u32; + + #[cfg(feature = "orchard")] + let orchard_final_tree = cb.vtx.iter().flat_map(|tx| tx.actions.iter()).fold( + self.chain_state.final_orchard_tree().clone(), + |mut acc, c_act| { + acc.append(MerkleHashOrchard::from_cmx(&c_act.cmx().unwrap())); + acc + }, + ); + #[cfg(feature = "orchard")] + let orchard_end_size = orchard_final_tree.tree_size() as u32; + #[cfg(not(feature = "orchard"))] + let orchard_end_size = cb.vtx.iter().fold(self.orchard_end_size, |sz, tx| { + sz + (tx.actions.len() as u32) + }); + Self { - height: cb.height(), hash: cb.hash(), - sapling_end_size: self.sapling_end_size - + cb.vtx.iter().map(|tx| tx.outputs.len() as u32).sum::(), - orchard_end_size: self.orchard_end_size - + cb.vtx.iter().map(|tx| tx.actions.len() as u32).sum::(), + chain_state: ChainState::new( + cb.height(), + sapling_final_tree, + #[cfg(feature = "orchard")] + orchard_final_tree, + ), + sapling_end_size, + orchard_end_size, } } + + fn height(&self) -> BlockHeight { + self.chain_state.block_height() + } } /// The state for a `zcash_client_sqlite` test. pub(crate) struct TestState { cache: Cache, - latest_cached_block: Option, + cached_blocks: BTreeMap, + latest_block_height: Option, _data_file: NamedTempFile, db_data: WalletDb, test_account: Option<( @@ -256,7 +304,25 @@ where } pub(crate) fn latest_cached_block(&self) -> Option<&CachedBlock> { - self.latest_cached_block.as_ref() + self.latest_block_height + .as_ref() + .and_then(|h| self.prior_cached_block(*h + 1)) + } + + fn prior_cached_block(&self, height: BlockHeight) -> Option<&CachedBlock> { + self.cached_blocks.range(..height).last().map(|(_, b)| b) + } + + fn cache_block( + &mut self, + prior_cached_block: &CachedBlock, + compact_block: CompactBlock, + ) -> Cache::InsertResult { + self.cached_blocks.insert( + compact_block.height(), + prior_cached_block.roll_forward(&compact_block), + ); + self.cache.insert(&compact_block) } /// Creates a fake block at the expected next height containing a single output of the @@ -267,22 +333,19 @@ where req: AddressType, value: NonNegativeAmount, ) -> (BlockHeight, Cache::InsertResult, Fvk::Nullifier) { - let cached_block = self - .latest_cached_block - .take() - .unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1)); - let height = cached_block.height + 1; + let pre_activation_block = CachedBlock::none(self.sapling_activation_height() - 1); + let prior_cached_block = self.latest_cached_block().unwrap_or(&pre_activation_block); + let height = prior_cached_block.height() + 1; let (res, nf) = self.generate_block_at( height, - cached_block.hash, + prior_cached_block.hash, fvk, req, value, - cached_block.sapling_end_size, - cached_block.orchard_end_size, + prior_cached_block.sapling_end_size, + prior_cached_block.orchard_end_size, ); - assert!(self.latest_cached_block.is_some()); (height, res, nf) } @@ -303,6 +366,59 @@ where initial_sapling_tree_size: u32, initial_orchard_tree_size: u32, ) -> (Cache::InsertResult, Fvk::Nullifier) { + let mut prior_cached_block = self + .prior_cached_block(height) + .cloned() + .unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1)); + assert!(prior_cached_block.chain_state.block_height() < height); + assert!(prior_cached_block.sapling_end_size <= initial_sapling_tree_size); + assert!(prior_cached_block.orchard_end_size <= initial_orchard_tree_size); + + // If the block height has increased or the Sapling and/or Orchard tree sizes have changed, + // we need to generate a new prior cached block that the block to be generated can + // successfully chain from, with the provided tree sizes. + if prior_cached_block.chain_state.block_height() == height - 1 { + assert_eq!(prev_hash, prior_cached_block.hash); + } else { + dbg!("cache gap", prior_cached_block.chain_state.block_height()..height); + + let final_sapling_tree = + dbg!(prior_cached_block.sapling_end_size..initial_sapling_tree_size).fold( + prior_cached_block.chain_state.final_sapling_tree().clone(), + |mut acc, _| { + acc.append(sapling::Node::from_scalar(bls12_381::Scalar::random( + &mut self.rng, + ))); + acc + }, + ); + + #[cfg(feature = "orchard")] + let final_orchard_tree = + dbg!(prior_cached_block.orchard_end_size..initial_orchard_tree_size).fold( + prior_cached_block.chain_state.final_orchard_tree().clone(), + |mut acc, _| { + acc.append(MerkleHashOrchard::random(&mut self.rng)); + acc + }, + ); + + prior_cached_block = CachedBlock::at( + prev_hash, + ChainState::new( + height - 1, + final_sapling_tree, + #[cfg(feature = "orchard")] + final_orchard_tree, + ), + initial_sapling_tree_size, + initial_orchard_tree_size, + ); + + self.cached_blocks + .insert(height - 1, prior_cached_block.clone()); + } + let (cb, nf) = fake_compact_block( &self.network(), height, @@ -314,17 +430,10 @@ where initial_orchard_tree_size, &mut self.rng, ); - let res = self.cache.insert(&cb); + assert_eq!(cb.height(), height); - self.latest_cached_block = Some( - CachedBlock::at( - height - 1, - cb.hash(), - initial_sapling_tree_size, - initial_orchard_tree_size, - ) - .roll_forward(&cb), - ); + let res = self.cache_block(&prior_cached_block, cb); + self.latest_block_height = Some(height); (res, nf) } @@ -338,27 +447,28 @@ where to: impl Into
, value: NonNegativeAmount, ) -> (BlockHeight, Cache::InsertResult) { - let cached_block = self - .latest_cached_block - .take() + let prior_cached_block = self + .latest_cached_block() + .cloned() .unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1)); - let height = cached_block.height + 1; + let height = prior_cached_block.height() + 1; let cb = fake_compact_block_spending( &self.network(), height, - cached_block.hash, + prior_cached_block.hash, note, fvk, to.into(), value, - cached_block.sapling_end_size, - cached_block.orchard_end_size, + prior_cached_block.sapling_end_size, + prior_cached_block.orchard_end_size, &mut self.rng, ); - let res = self.cache.insert(&cb); + assert_eq!(cb.height(), height); - self.latest_cached_block = Some(cached_block.roll_forward(&cb)); + let res = self.cache_block(&prior_cached_block, cb); + self.latest_block_height = Some(height); (height, res) } @@ -393,24 +503,25 @@ where tx_index: usize, tx: &Transaction, ) -> (BlockHeight, Cache::InsertResult) { - let cached_block = self - .latest_cached_block - .take() + let prior_cached_block = self + .latest_cached_block() + .cloned() .unwrap_or_else(|| CachedBlock::none(self.sapling_activation_height() - 1)); - let height = cached_block.height + 1; + let height = prior_cached_block.height() + 1; let cb = fake_compact_block_from_tx( height, - cached_block.hash, + prior_cached_block.hash, tx_index, tx, - cached_block.sapling_end_size, - cached_block.orchard_end_size, + prior_cached_block.sapling_end_size, + prior_cached_block.orchard_end_size, &mut self.rng, ); - let res = self.cache.insert(&cb); + assert_eq!(cb.height(), height); - self.latest_cached_block = Some(cached_block.roll_forward(&cb)); + let res = self.cache_block(&prior_cached_block, cb); + self.latest_block_height = Some(height); (height, res) } @@ -438,13 +549,19 @@ where ::Error, >, > { - scan_cached_blocks( + let prior_cached_block = self + .prior_cached_block(from_height) + .cloned() + .unwrap_or_else(|| CachedBlock::none(from_height - 1)); + + let result = scan_cached_blocks( &self.network(), self.cache.block_source(), &mut self.db_data, - from_height, + &prior_cached_block.chain_state, limit, - ) + ); + result } /// Resets the wallet using a new wallet database but with the same cache of blocks, @@ -455,7 +572,7 @@ where /// Before using any `generate_*` method on the reset state, call `reset_latest_cached_block()`. pub(crate) fn reset(&mut self) -> NamedTempFile { let network = self.network(); - self.latest_cached_block = None; + self.latest_block_height = None; let tf = std::mem::replace(&mut self._data_file, NamedTempFile::new().unwrap()); self.db_data = WalletDb::for_path(self._data_file.path(), network).unwrap(); self.test_account = None; @@ -463,23 +580,23 @@ where tf } - /// Reset the latest cached block to the most recent one in the cache database. - #[allow(dead_code)] - pub(crate) fn reset_latest_cached_block(&mut self) { - self.cache - .block_source() - .with_blocks::<_, Infallible>(None, None, |block: CompactBlock| { - let chain_metadata = block.chain_metadata.unwrap(); - self.latest_cached_block = Some(CachedBlock::at( - BlockHeight::from_u32(block.height.try_into().unwrap()), - BlockHash::from_slice(block.hash.as_slice()), - chain_metadata.sapling_commitment_tree_size, - chain_metadata.orchard_commitment_tree_size, - )); - Ok(()) - }) - .unwrap(); - } + // /// Reset the latest cached block to the most recent one in the cache database. + // #[allow(dead_code)] + // pub(crate) fn reset_latest_cached_block(&mut self) { + // self.cache + // .block_source() + // .with_blocks::<_, Infallible>(None, None, |block: CompactBlock| { + // let chain_metadata = block.chain_metadata.unwrap(); + // self.latest_cached_block = Some(CachedBlock::at( + // BlockHash::from_slice(block.hash.as_slice()), + // BlockHeight::from_u32(block.height.try_into().unwrap()), + // chain_metadata.sapling_commitment_tree_size, + // chain_metadata.orchard_commitment_tree_size, + // )); + // Ok(()) + // }) + // .unwrap(); + // } } impl TestState { diff --git a/zcash_client_sqlite/src/testing/pool.rs b/zcash_client_sqlite/src/testing/pool.rs index 9cdb440b44..4beffc3d73 100644 --- a/zcash_client_sqlite/src/testing/pool.rs +++ b/zcash_client_sqlite/src/testing/pool.rs @@ -1453,7 +1453,7 @@ pub(crate) fn cross_pool_exchange