From 0057a112158cd015bfb1adec4e54c82a0cafdbfe Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 12 May 2023 23:23:16 -0500 Subject: [PATCH 01/39] rough draft of potential parent search --- client/consensus/common/src/lib.rs | 159 ++++++++++++++++++++++++ client/relay-chain-interface/src/lib.rs | 7 ++ 2 files changed, 166 insertions(+) diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index b74829e191f..c8394dafb96 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -16,6 +16,8 @@ use polkadot_primitives::{Hash as PHash, PersistedValidationData}; +use cumulus_primitives_core::ParaId; + use sc_client_api::Backend; use sc_consensus::{shared_data::SharedData, BlockImport, ImportResult}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -174,3 +176,160 @@ where pub trait ParachainBlockImportMarker {} impl ParachainBlockImportMarker for ParachainBlockImport {} + +/// Parameters when searching for suitable parents to build on top of. +pub struct ParentSearchParams { + /// The relay-parent that is intended to be used. + pub relay_parent: PHash, + /// The ID of the parachain. + pub para_id: ParaId, + /// A limitation on the age of relay parents for parachain blocks that are being + /// considered. This is relative to the `relay_parent` number. + pub ancestry_lookback: usize, + /// How "deep" parents can be relative to the included parachain block at the relay-parent. + /// The included block has depth 0. + pub max_depth: usize, + /// Whether to only ignore "alternative" branches, i.e. branches of the chain + /// which do not contain the block pending availability. + pub ignore_alternative_branches: bool, +} + +/// A potential parent block returned from [`find_potential_parents`] +pub struct PotentialParent { + /// The hash of the block. + pub hash: B::Hash, + /// The header of the block. + pub header: B::Header, + /// The depth of the block. + pub depth: usize, + /// Whether the block descends from the block pending availability. + /// + /// This is false for the last inclued block as well as the block pending availability itself. + pub descends_from_pending: bool, +} + +/// Perform a recursive search through blocks to find potential +/// parent blocks for a new block. +/// +/// This accepts a relay-chain block to be used as an anchor and a maximum search depth, +/// along with some arguments for filtering parachain blocks and performs a recursive search +/// for parachain blocks. The search begins at the last included parachain block and returns +/// a set of [`PotentialParent`]s which could be potential parents of a new block with this +/// relay-parent according to the search parameters. +/// +/// A parachain block is a potential parent if it is either the last included parachain block, the pending +/// parachain block (when `max_depth` >= 1), or all of the following hold: +/// * its parent is a potential parent +/// * its relay-parent is within `ancestry_lookback` of the targeted relay-parent. +/// * the block number is within `max_depth` blocks of the included block +pub async fn find_potential_parents( + params: ParentSearchParams, + client: &C, + relay_client: &impl RelayChainInterface, +) -> Result, RelayChainError> { + // 1. Build up the ancestry record of the relay chain to compare against. + let rp_ancestry = { + let mut ancestry = Vec::with_capacity(params.ancestry_lookback + 1); + let mut current_rp = params.relay_parent; + while ancestry.len() <= params.ancestry_lookback { + let header = match relay_client.header(current_rp).await? { + None => break, + Some(h) => h, + }; + + ancestry.push((current_rp, header.state_root().clone())); + current_rp = header.parent_hash().clone(); + + // don't iterate back into the genesis block. + if header.number == 1u32.into() { break } + } + + rp_ancestry + }; + + let is_hash_in_ancestry = |hash| rp_ancestry.iter().any(|x| x.0 == hash); + let is_root_in_ancestry = |root| rp.ancestry.iter().any(|x| x.1 == root); + + // 2. Get the included and pending availability blocks. + let included_header = relay_client.persisted_validation_data( + params.relay_parent, + params.para_id, + OccupiedCoreAssumption::TimedOut, + )?; + + let included_header = match included_header { + Some(pvd) => pvd.parent_head, + None => return Ok(Vec::new()), // this implies the para doesn't exist. + }; + + let pending_header = relay_client.persisted_validation_data( + params.relay_parent, + params.para_id, + OccupiedCoreAssumption::Included, + )?.and_then(|x| if x.parent_head != included_header { Some(x.parent_head) } else { None }); + + let included_header = match B::Header::decode(&mut &included_header.0[..]).ok() { + None => return Ok(Vec::new()), + Some(x) => x, + }; + // Silently swallow if pending block can't decode. + let pending_header = pending_header.map(|p| B::Header::decode(&mut &p.0[..]).ok()).flatten(); + let included_hash = included_header.hash(); + let pending_hash = pending_header.as_ref().map(|hdr| hdr.hash()); + + let mut frontier = vec![PotentialParent { + hash: included_hash, + header: included_header, + depth: 0, + descends_from_pending: false, + }]; + + // Recursive search through descendants of the included block which have acceptable + // relay parents. + let mut potential_parents = Vec::new(); + while let Some(entry) = frontier.pop() { + let is_pending = entry.depth == 1 + && pending_hash.as_ref().map_or(false, |h| &entry.hash == h); + let is_included = entry.depth == 0; + + // note: even if the pending block or included block have a relay parent + // outside of the expected part of the relay chain, they are always allowed + // because they have already been posted on chain. + let is_potential = is_pending || is_included || { + let digest = entry.header.digest(); + cumulus_primitives_core::extract_relay_parent(digest) + .map_or(false, is_hash_in_ancestry) || + cumulus_primitives_core::rpsr_digest::extract_relay_parent_storage_root(digest) + .map_or(false, is_root_in_ancestry) + }; + + if is_potential { + potential_parents.push(entry); + } + + if !is_potential || entry.depth + 1 > max_depth { continue } + + // push children onto search frontier. + for child in client.children(entry.hash).ok().flatten().into_iter().flat_map(|c| c) { + if params.ignore_alternative_branches + && is_included + && pending_hash.map_or(false, |h| &child != h) + { continue } + + let header = match client.header(child) { + Ok(Some(h)) => h, + Ok(None) => continue, + Err(_) => continue, + }; + + frontier.push(PotentialParent { + hash: child, + header, + depth: entry.depth + 1, + descends_from_pending: is_pending || entry.descends_from_pending, + }); + } + } + + Ok(potential_parents) +} diff --git a/client/relay-chain-interface/src/lib.rs b/client/relay-chain-interface/src/lib.rs index 3629aea84cd..d25487ee13a 100644 --- a/client/relay-chain-interface/src/lib.rs +++ b/client/relay-chain-interface/src/lib.rs @@ -113,6 +113,9 @@ pub trait RelayChainInterface: Send + Sync { /// Get the hash of the finalized block. async fn finalized_block_hash(&self) -> RelayChainResult; + /// Get a header by hash, if it exists. + async fn header(&self, block_id: PHash) -> RelayChainResult>; + /// Returns the whole contents of the downward message queue for the parachain we are collating /// for. /// @@ -260,6 +263,10 @@ where (**self).finalized_block_hash().await } + async fn header(&self, block_id: PHash) -> RelayChainResult> { + (**self).header().await + } + async fn is_major_syncing(&self) -> RelayChainResult { (**self).is_major_syncing().await } From 5766a6a1aacc65875ef1af139a3d2883640ea3f9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 12 May 2023 23:43:55 -0500 Subject: [PATCH 02/39] get things compiling --- client/consensus/common/src/lib.rs | 34 +++++++++++++++---------- client/relay-chain-interface/src/lib.rs | 2 +- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index c8394dafb96..07fc4678046 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +use codec::Decode; use polkadot_primitives::{Hash as PHash, PersistedValidationData}; -use cumulus_primitives_core::ParaId; +use cumulus_primitives_core::{relay_chain::OccupiedCoreAssumption, ParaId}; +use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; use sc_client_api::Backend; use sc_consensus::{shared_data::SharedData, BlockImport, ImportResult}; @@ -224,9 +226,9 @@ pub struct PotentialParent { /// * the block number is within `max_depth` blocks of the included block pub async fn find_potential_parents( params: ParentSearchParams, - client: &C, + client: &impl sp_blockchain::Backend, relay_client: &impl RelayChainInterface, -) -> Result, RelayChainError> { +) -> Result>, RelayChainError> { // 1. Build up the ancestry record of the relay chain to compare against. let rp_ancestry = { let mut ancestry = Vec::with_capacity(params.ancestry_lookback + 1); @@ -241,21 +243,21 @@ pub async fn find_potential_parents( current_rp = header.parent_hash().clone(); // don't iterate back into the genesis block. - if header.number == 1u32.into() { break } + if header.number == 1 { break } } - rp_ancestry + ancestry }; let is_hash_in_ancestry = |hash| rp_ancestry.iter().any(|x| x.0 == hash); - let is_root_in_ancestry = |root| rp.ancestry.iter().any(|x| x.1 == root); + let is_root_in_ancestry = |root| rp_ancestry.iter().any(|x| x.1 == root); // 2. Get the included and pending availability blocks. let included_header = relay_client.persisted_validation_data( params.relay_parent, params.para_id, OccupiedCoreAssumption::TimedOut, - )?; + ).await?; let included_header = match included_header { Some(pvd) => pvd.parent_head, @@ -266,7 +268,7 @@ pub async fn find_potential_parents( params.relay_parent, params.para_id, OccupiedCoreAssumption::Included, - )?.and_then(|x| if x.parent_head != included_header { Some(x.parent_head) } else { None }); + ).await?.and_then(|x| if x.parent_head != included_header { Some(x.parent_head) } else { None }); let included_header = match B::Header::decode(&mut &included_header.0[..]).ok() { None => return Ok(Vec::new()), @@ -277,7 +279,7 @@ pub async fn find_potential_parents( let included_hash = included_header.hash(); let pending_hash = pending_header.as_ref().map(|hdr| hdr.hash()); - let mut frontier = vec![PotentialParent { + let mut frontier = vec![PotentialParent:: { hash: included_hash, header: included_header, depth: 0, @@ -303,17 +305,21 @@ pub async fn find_potential_parents( .map_or(false, is_root_in_ancestry) }; + let descends_from_pending = entry.descends_from_pending; + let child_depth = entry.depth + 1; + let hash = entry.hash; + if is_potential { potential_parents.push(entry); } - if !is_potential || entry.depth + 1 > max_depth { continue } + if !is_potential || child_depth > params.max_depth { continue } // push children onto search frontier. - for child in client.children(entry.hash).ok().flatten().into_iter().flat_map(|c| c) { + for child in client.children(hash).ok().into_iter().flat_map(|c| c) { if params.ignore_alternative_branches && is_included - && pending_hash.map_or(false, |h| &child != h) + && pending_hash.map_or(false, |h| child != h) { continue } let header = match client.header(child) { @@ -325,8 +331,8 @@ pub async fn find_potential_parents( frontier.push(PotentialParent { hash: child, header, - depth: entry.depth + 1, - descends_from_pending: is_pending || entry.descends_from_pending, + depth: child_depth, + descends_from_pending: is_pending || descends_from_pending, }); } } diff --git a/client/relay-chain-interface/src/lib.rs b/client/relay-chain-interface/src/lib.rs index d25487ee13a..c27814d7d8c 100644 --- a/client/relay-chain-interface/src/lib.rs +++ b/client/relay-chain-interface/src/lib.rs @@ -264,7 +264,7 @@ where } async fn header(&self, block_id: PHash) -> RelayChainResult> { - (**self).header().await + (**self).header(block_id).await } async fn is_major_syncing(&self) -> RelayChainResult { From 03a880a465087b45bbd74842acf9614a2d5e3459 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 12 May 2023 23:44:09 -0500 Subject: [PATCH 03/39] fmt --- client/consensus/common/src/lib.rs | 49 ++++++++++++++++++------------ 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index 07fc4678046..a366c96f39a 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -243,7 +243,9 @@ pub async fn find_potential_parents( current_rp = header.parent_hash().clone(); // don't iterate back into the genesis block. - if header.number == 1 { break } + if header.number == 1 { + break + } } ancestry @@ -253,22 +255,27 @@ pub async fn find_potential_parents( let is_root_in_ancestry = |root| rp_ancestry.iter().any(|x| x.1 == root); // 2. Get the included and pending availability blocks. - let included_header = relay_client.persisted_validation_data( - params.relay_parent, - params.para_id, - OccupiedCoreAssumption::TimedOut, - ).await?; + let included_header = relay_client + .persisted_validation_data( + params.relay_parent, + params.para_id, + OccupiedCoreAssumption::TimedOut, + ) + .await?; let included_header = match included_header { Some(pvd) => pvd.parent_head, None => return Ok(Vec::new()), // this implies the para doesn't exist. }; - let pending_header = relay_client.persisted_validation_data( - params.relay_parent, - params.para_id, - OccupiedCoreAssumption::Included, - ).await?.and_then(|x| if x.parent_head != included_header { Some(x.parent_head) } else { None }); + let pending_header = relay_client + .persisted_validation_data( + params.relay_parent, + params.para_id, + OccupiedCoreAssumption::Included, + ) + .await? + .and_then(|x| if x.parent_head != included_header { Some(x.parent_head) } else { None }); let included_header = match B::Header::decode(&mut &included_header.0[..]).ok() { None => return Ok(Vec::new()), @@ -290,8 +297,8 @@ pub async fn find_potential_parents( // relay parents. let mut potential_parents = Vec::new(); while let Some(entry) = frontier.pop() { - let is_pending = entry.depth == 1 - && pending_hash.as_ref().map_or(false, |h| &entry.hash == h); + let is_pending = + entry.depth == 1 && pending_hash.as_ref().map_or(false, |h| &entry.hash == h); let is_included = entry.depth == 0; // note: even if the pending block or included block have a relay parent @@ -299,8 +306,7 @@ pub async fn find_potential_parents( // because they have already been posted on chain. let is_potential = is_pending || is_included || { let digest = entry.header.digest(); - cumulus_primitives_core::extract_relay_parent(digest) - .map_or(false, is_hash_in_ancestry) || + cumulus_primitives_core::extract_relay_parent(digest).map_or(false, is_hash_in_ancestry) || cumulus_primitives_core::rpsr_digest::extract_relay_parent_storage_root(digest) .map_or(false, is_root_in_ancestry) }; @@ -313,14 +319,17 @@ pub async fn find_potential_parents( potential_parents.push(entry); } - if !is_potential || child_depth > params.max_depth { continue } + if !is_potential || child_depth > params.max_depth { + continue + } // push children onto search frontier. for child in client.children(hash).ok().into_iter().flat_map(|c| c) { - if params.ignore_alternative_branches - && is_included - && pending_hash.map_or(false, |h| child != h) - { continue } + if params.ignore_alternative_branches && + is_included && pending_hash.map_or(false, |h| child != h) + { + continue + } let header = match client.header(child) { Ok(Some(h)) => h, From bbce10e05afda255376bdf9d6ad28b785065e9ff Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 13 May 2023 00:04:12 -0500 Subject: [PATCH 04/39] add new function to all RelayChainInterface implementations --- client/consensus/common/src/tests.rs | 5 ++++- client/network/src/tests.rs | 4 ++++ client/relay-chain-inprocess-interface/src/lib.rs | 4 ++++ client/relay-chain-rpc-interface/src/lib.rs | 4 ++++ 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/client/consensus/common/src/tests.rs b/client/consensus/common/src/tests.rs index f1bc4d42b8a..6e65f4bd11b 100644 --- a/client/consensus/common/src/tests.rs +++ b/client/consensus/common/src/tests.rs @@ -186,7 +186,6 @@ impl RelayChainInterface for Relaychain { } async fn wait_for_block(&self, _: PHash) -> RelayChainResult<()> { - unimplemented!("Not needed for test") } async fn new_best_notification_stream( @@ -207,6 +206,10 @@ impl RelayChainInterface for Relaychain { }) .boxed()) } + + async fn header(&self, block_id: PHash) -> RelayChainResult> { + unimplemented!("Not needed for test") + } } fn build_block( diff --git a/client/network/src/tests.rs b/client/network/src/tests.rs index 08127fe390a..f0e34a0fbe6 100644 --- a/client/network/src/tests.rs +++ b/client/network/src/tests.rs @@ -237,6 +237,10 @@ impl RelayChainInterface for DummyRelayChainInterface { }); Ok(Box::pin(notifications_stream)) } + + async fn header(&self, block_id: PHash) -> RelayChainResult> { + unimplemented!("Not needed for test") + } } fn make_validator_and_api( diff --git a/client/relay-chain-inprocess-interface/src/lib.rs b/client/relay-chain-inprocess-interface/src/lib.rs index c1e19bd20b6..627b2b97992 100644 --- a/client/relay-chain-inprocess-interface/src/lib.rs +++ b/client/relay-chain-inprocess-interface/src/lib.rs @@ -171,6 +171,10 @@ where Ok(self.sync_oracle.is_major_syncing()) } + async fn header(&self, block_id: PHash) -> RelayChainResult> { + Ok(self.backend.header(block_hash)?) + } + fn overseer_handle(&self) -> RelayChainResult { Ok(self.overseer_handle.clone()) } diff --git a/client/relay-chain-rpc-interface/src/lib.rs b/client/relay-chain-rpc-interface/src/lib.rs index 475d5d905b6..4db7ea55dde 100644 --- a/client/relay-chain-rpc-interface/src/lib.rs +++ b/client/relay-chain-rpc-interface/src/lib.rs @@ -132,6 +132,10 @@ impl RelayChainInterface for RelayChainRpcInterface { self.rpc_client.system_health().await.map(|h| h.is_syncing) } + async fn header(&self, block_id: RelayHash) -> RelayChainResult> { + self.rpc_client.chain_get_header(Some(block_id)).await + } + fn overseer_handle(&self) -> RelayChainResult { Ok(self.overseer_handle.clone()) } From db7f9a564b98add0db13c3889dc1a767e9ef4fef Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 15 May 2023 15:51:31 -0400 Subject: [PATCH 05/39] fix compilation --- client/consensus/common/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index a366c96f39a..22b783fbb51 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -308,6 +308,7 @@ pub async fn find_potential_parents( let digest = entry.header.digest(); cumulus_primitives_core::extract_relay_parent(digest).map_or(false, is_hash_in_ancestry) || cumulus_primitives_core::rpsr_digest::extract_relay_parent_storage_root(digest) + .map(|(r, _n)| r) .map_or(false, is_root_in_ancestry) }; From 832a1c0dcbc932331e48255a2f9b9593bbf4ab2d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 17 May 2023 17:47:23 -0400 Subject: [PATCH 06/39] set slot and timestamp based on relay parent, prepare for find-parent --- Cargo.lock | 1 + client/consensus/aura/Cargo.toml | 1 + client/consensus/aura/src/unstable_reimpl.rs | 173 +++++++++++++------ client/consensus/common/src/tests.rs | 3 +- 4 files changed, 126 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f4f181303b4..271fac694c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2233,6 +2233,7 @@ dependencies = [ "sc-client-api", "sc-consensus", "sc-consensus-aura", + "sc-consensus-babe", "sc-consensus-slots", "sc-telemetry", "sp-api", diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 114e2ebed5b..7d6da6a09da 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -15,6 +15,7 @@ tracing = "0.1.37" sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/client/consensus/aura/src/unstable_reimpl.rs b/client/consensus/aura/src/unstable_reimpl.rs index f9602a363bf..d1fdc8793dd 100644 --- a/client/consensus/aura/src/unstable_reimpl.rs +++ b/client/consensus/aura/src/unstable_reimpl.rs @@ -34,7 +34,7 @@ use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::{CollationResult, MaybeCompressedPoV}; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Id as ParaId}; +use polkadot_primitives::{Block as PBlock, CollatorPair, Header as PHeader, Id as ParaId}; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; @@ -58,6 +58,7 @@ use sp_runtime::{ traits::{Block as BlockT, HashFor, Header as HeaderT, Member}, }; use sp_state_machine::StorageChanges; +use sp_timestamp::Timestamp; use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; /// Parameters for [`run_bare_relay_driven`]. @@ -72,10 +73,57 @@ pub struct Params { pub para_id: ParaId, pub overseer_handle: OverseerHandle, pub slot_duration: SlotDuration, + pub relay_chain_slot_duration: SlotDuration, pub proposer: Proposer, pub collator_service: CS, } +/// Run async-backing-friendly Aura. +pub async fn run_async_backing_driven( + params: Params, +) where + Block: BlockT, + Client: ProvideRuntimeApi + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: AuraApi + CollectCollationInfo, + RClient: RelayChainInterface, + CIDP: CreateInherentDataProviders + 'static, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + Proposer: ProposerInterface, + Proposer::Transaction: Sync, + CS: CollatorServiceInterface, + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, +{ + let mut proposer = params.proposer; + let mut block_import = params.block_import; + + let mut import_notifications = match params.relay_client.import_notification_stream().await { + Ok(s) => s, + Err(err) => { + tracing::error!( + target: crate::LOG_TARGET, + ?err, + "Failed to initialize consensus: no relay chain import notification stream" + ); + + return + }, + }; + + while let Some(relay_parent_header) = import_notifications.next().await { + let relay_parent = relay_parent_header.hash(); + } +} + /// Run bare Aura consensus as a relay-chain-driven collator. pub async fn run_bare_relay_driven( params: Params, @@ -120,12 +168,19 @@ pub async fn run_bare_relay_driven {{ + match $x { + Ok(x) => x, + Err(e) => reject_with_error!(e), + } + }}; + } + let validation_data = request.persisted_validation_data(); - let parent_header = match Block::Header::decode(&mut &validation_data.parent_head.0[..]) { - Ok(x) => x, - Err(e) => reject_with_error!(e), - }; + let parent_header = + try_request!(Block::Header::decode(&mut &validation_data.parent_head.0[..])); let parent_hash = parent_header.hash(); @@ -133,10 +188,18 @@ pub async fn run_bare_relay_driven reject_with_error!(e), + Ok(None) => continue, // sanity: would be inconsistent to get `None` here + Ok(Some(h)) => h, + }; + let claim = match claim_slot::<_, _, P>( &*params.para_client, parent_hash, + &relay_parent_header, params.slot_duration, + params.relay_chain_slot_duration, ¶ms.keystore, ) .await @@ -146,51 +209,45 @@ pub async fn run_bare_relay_driven reject_with_error!(e), }; - let (parachain_inherent_data, other_inherent_data) = match create_inherent_data( - *request.relay_parent(), - &validation_data, - parent_hash, - params.para_id, - ¶ms.relay_client, - ¶ms.create_inherent_data_providers, - ) - .await - { - Ok(x) => x, - Err(e) => reject_with_error!(e), - }; - - let proposal = match proposer - .propose( - &parent_header, - ¶chain_inherent_data, - other_inherent_data, - Digest { logs: vec![claim.pre_digest] }, - // TODO [https://github.com/paritytech/cumulus/issues/2439] - // We should call out to a pluggable interface that provides - // the proposal duration. - Duration::from_millis(500), - // Set the block limit to 50% of the maximum PoV size. - // - // TODO: If we got benchmarking that includes the proof size, - // we should be able to use the maximum pov size. - Some((validation_data.max_pov_size / 2) as usize), + let (parachain_inherent_data, other_inherent_data) = try_request!( + create_inherent_data( + *request.relay_parent(), + &validation_data, + parent_hash, + params.para_id, + claim.timestamp, + ¶ms.relay_client, + ¶ms.create_inherent_data_providers, ) .await - { - Ok(p) => p, - Err(e) => reject_with_error!(e), - }; + ); + + let proposal = try_request!( + proposer + .propose( + &parent_header, + ¶chain_inherent_data, + other_inherent_data, + Digest { logs: vec![claim.pre_digest] }, + // TODO [https://github.com/paritytech/cumulus/issues/2439] + // We should call out to a pluggable interface that provides + // the proposal duration. + Duration::from_millis(500), + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + Some((validation_data.max_pov_size / 2) as usize), + ) + .await + ); - let sealed_importable = match seal::<_, _, P>( + let sealed_importable = try_request!(seal::<_, _, P>( proposal.block, proposal.storage_changes, &claim.author_pub, ¶ms.keystore, - ) { - Ok(s) => s, - Err(e) => reject_with_error!(e), - }; + )); let post_hash = sealed_importable.post_hash(); let block = Block::new( @@ -202,9 +259,7 @@ pub async fn run_bare_relay_driven Slot { struct SlotClaim { author_pub: Pub, pre_digest: sp_runtime::DigestItem, + timestamp: Timestamp, } async fn claim_slot( client: &C, parent_hash: B::Hash, + relay_parent_header: &PHeader, slot_duration: SlotDuration, + relay_chain_slot_duration: SlotDuration, keystore: &KeystorePtr, ) -> Result>, Box> where @@ -265,8 +323,18 @@ where // load authorities let authorities = client.runtime_api().authorities(parent_hash).map_err(Box::new)?; - // Determine the current slot. - let slot_now = slot_now(slot_duration); + // Determine the current slot and timestamp based on the relay-parent's. + let (slot_now, timestamp) = + match sc_consensus_babe::find_pre_digest::(relay_parent_header) { + Ok(babe_pre_digest) => { + let t = + Timestamp::new(relay_chain_slot_duration.as_millis() * *babe_pre_digest.slot()); + let slot = Slot::from_timestamp(t, slot_duration); + + (slot, t) + }, + Err(_) => return Ok(None), + }; // Try to claim the slot locally. let author_pub = { @@ -280,14 +348,17 @@ where // Produce the pre-digest. let pre_digest = aura_internal::pre_digest::

(slot_now); - Ok(Some(SlotClaim { author_pub, pre_digest })) + Ok(Some(SlotClaim { author_pub, pre_digest, timestamp })) } +// This explicitly creates the inherent data for parachains, as well as overriding the +// timestamp based on the slot number. async fn create_inherent_data( relay_parent: PHash, validation_data: &PersistedValidationData, parent_hash: B::Hash, para_id: ParaId, + timestamp: Timestamp, relay_chain_interface: &impl RelayChainInterface, create_inherent_data_providers: &impl CreateInherentDataProviders, ) -> Result<(ParachainInherentData, InherentData), Box> { @@ -305,7 +376,7 @@ async fn create_inherent_data( return Err(format!("Could not create paras inherent data at {:?}", relay_parent).into()), }; - let other_inherent_data = create_inherent_data_providers + let mut other_inherent_data = create_inherent_data_providers .create_inherent_data_providers(parent_hash, ()) .map_err(|e| e as Box) .await? @@ -313,6 +384,8 @@ async fn create_inherent_data( .await .map_err(Box::new)?; + other_inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp); + Ok((paras_inherent_data, other_inherent_data)) } diff --git a/client/consensus/common/src/tests.rs b/client/consensus/common/src/tests.rs index 6e65f4bd11b..f71cbcab89e 100644 --- a/client/consensus/common/src/tests.rs +++ b/client/consensus/common/src/tests.rs @@ -185,8 +185,7 @@ impl RelayChainInterface for Relaychain { unimplemented!("Not needed for test") } - async fn wait_for_block(&self, _: PHash) -> RelayChainResult<()> { - } + async fn wait_for_block(&self, _: PHash) -> RelayChainResult<()> {} async fn new_best_notification_stream( &self, From b17694af854b33fca4e65778a3f3f99289165d43 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 22 May 2023 14:27:42 -0400 Subject: [PATCH 07/39] skeleton of new aura logic --- client/consensus/aura/src/unstable_reimpl.rs | 75 ++++++++++++++++++-- client/consensus/common/src/lib.rs | 3 +- 2 files changed, 70 insertions(+), 8 deletions(-) diff --git a/client/consensus/aura/src/unstable_reimpl.rs b/client/consensus/aura/src/unstable_reimpl.rs index d1fdc8793dd..26a5285fb2f 100644 --- a/client/consensus/aura/src/unstable_reimpl.rs +++ b/client/consensus/aura/src/unstable_reimpl.rs @@ -24,7 +24,7 @@ use codec::{Decode, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{ParachainBlockImportMarker, ParachainCandidate}; +use cumulus_client_consensus_common::{ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{ relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, @@ -103,8 +103,7 @@ pub async fn run_async_backing_driven> + Hash + Member + Encode + Decode, { - let mut proposer = params.proposer; - let mut block_import = params.block_import; + let mut params = params; let mut import_notifications = match params.relay_client.import_notification_stream().await { Ok(s) => s, @@ -121,9 +120,72 @@ pub async fn run_async_backing_driven = unimplemented!(); + + let potential_parents = cumulus_client_consensus_common::find_potential_parents::( + parent_search_params, + &fake_hack, // sp_blockchain::Backend + ¶ms.relay_client, + ).await; + + let mut potential_parents = match potential_parents { + Err(e) => { + tracing::error!( + target: crate::LOG_TARGET, + ?relay_parent, + err = ?e, + "Could not fetch potential parents to build upon" + ); + + continue; + } + Ok(x) => x, + }; + + // Sort by depth, descending, to choose the longest chain, and lazily filter + // by those with space. + potential_parents.sort_by(|a, b| b.depth.cmp(&a.depth)); + let potential_parents = potential_parents + .into_iter() + .filter(|p| can_build_upon(p.hash, &*params.para_client)); + + if let Some(parent) = potential_parents.next() { + // TODO [now]: build and announce collations recursively until + // `can_build_upon` fails. + unimplemented!() + } } } +fn can_build_upon( + block_hash: Block::Hash, + client: &Client, +) -> bool where + Client: ProvideRuntimeApi +{ + // TODO [now]: claim slot, maybe with an authorities cache to avoid + // all validators doing this every new relay-chain block. + // Actually, as long as sessions are based on slot number then they should + // be the same for all... + // + // TODO [now]: new runtime API, + // AuraUnincludedSegmentApi::has_space(slot) or something like it. + unimplemented!() +} + /// Run bare Aura consensus as a relay-chain-driven collator. pub async fn run_bare_relay_driven( params: Params, @@ -149,8 +211,7 @@ pub async fn run_bare_relay_driven> + Hash + Member + Encode + Decode, { - let mut proposer = params.proposer; - let mut block_import = params.block_import; + let mut params = params; let mut collation_requests = cumulus_client_collator::relay_chain_driven::init( params.key, @@ -223,7 +284,7 @@ pub async fn run_bare_relay_driven { pub depth: usize, /// Whether the block descends from the block pending availability. /// - /// This is false for the last inclued block as well as the block pending availability itself. + /// This is false for the last included block as well as the block pending availability itself. + // TODO [now]: change this to be true for the pending blocks themselves. pub descends_from_pending: bool, } From f70fcf186c3938e0bde002d86160ca2946c4fd72 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 22 May 2023 15:00:24 -0400 Subject: [PATCH 08/39] fmt --- client/consensus/aura/src/lib.rs | 1 + client/consensus/aura/src/unstable_reimpl.rs | 24 +++++++++++--------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 416ae776da1..1202f05486b 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -50,6 +50,7 @@ pub use import_queue::{build_verifier, import_queue, BuildVerifierParams, Import pub use sc_consensus_aura::{slot_duration, AuraVerifier, BuildAuraWorkerParams, SlotProportion}; pub use sc_consensus_slots::InherentDataProviderExt; +pub mod collator; pub mod unstable_reimpl; const LOG_TARGET: &str = "aura::cumulus"; diff --git a/client/consensus/aura/src/unstable_reimpl.rs b/client/consensus/aura/src/unstable_reimpl.rs index 26a5285fb2f..4875e04425c 100644 --- a/client/consensus/aura/src/unstable_reimpl.rs +++ b/client/consensus/aura/src/unstable_reimpl.rs @@ -24,7 +24,9 @@ use codec::{Decode, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams}; +use cumulus_client_consensus_common::{ + ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams, +}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{ relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, @@ -133,13 +135,14 @@ pub async fn run_async_backing_driven = unimplemented!(); + let fake_hack: sc_client_api::in_mem::Blockchain = unimplemented!(); let potential_parents = cumulus_client_consensus_common::find_potential_parents::( parent_search_params, &fake_hack, // sp_blockchain::Backend ¶ms.relay_client, - ).await; + ) + .await; let mut potential_parents = match potential_parents { Err(e) => { @@ -150,8 +153,8 @@ pub async fn run_async_backing_driven x, }; @@ -170,11 +173,9 @@ pub async fn run_async_backing_driven( - block_hash: Block::Hash, - client: &Client, -) -> bool where - Client: ProvideRuntimeApi +fn can_build_upon(block_hash: Block::Hash, client: &Client) -> bool +where + Client: ProvideRuntimeApi, { // TODO [now]: claim slot, maybe with an authorities cache to avoid // all validators doing this every new relay-chain block. @@ -284,7 +285,8 @@ pub async fn run_bare_relay_driven Date: Mon, 22 May 2023 15:06:51 -0400 Subject: [PATCH 09/39] introduce a collator module in the Aura crate --- client/consensus/aura/src/collator.rs | 356 ++++++++++++++++++++++++++ 1 file changed, 356 insertions(+) create mode 100644 client/consensus/aura/src/collator.rs diff --git a/client/consensus/aura/src/collator.rs b/client/consensus/aura/src/collator.rs new file mode 100644 index 00000000000..1194ce694f5 --- /dev/null +++ b/client/consensus/aura/src/collator.rs @@ -0,0 +1,356 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! The core collator logic for Aura - slot claiming, block proposing, and collation +//! packaging. +//! +//! The [`Collator`] struct exposed here is meant to be a component of higher-level logic +//! which actually manages the control flow of the collator - which slots to claim, how +//! many collations to build, when to work, etc. +//! +//! This module also exposes some standalone functions for common operations when building +//! aura-based collators. + +use codec::{Decode, Encode}; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{ParachainBlockImportMarker, ParachainCandidate}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_core::{ + relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, +}; +use cumulus_primitives_parachain_inherent::ParachainInherentData; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_node_primitives::{Collation, MaybeCompressedPoV}; +use polkadot_overseer::Handle as OverseerHandle; +use polkadot_primitives::{Block as PBlock, Header as PHeader, Id as ParaId}; + +use futures::prelude::*; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; +use sc_consensus_aura::standalone as aura_internal; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_consensus::BlockOrigin; +use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_core::crypto::Pair; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_keystore::KeystorePtr; +use sp_runtime::{ + generic::Digest, + traits::{Block as BlockT, HashFor, Header as HeaderT, Member}, +}; +use sp_state_machine::StorageChanges; +use sp_timestamp::Timestamp; +use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; + +/// Parameters for instantiating a [`Collator`]. +pub struct Params { + /// A builder for inherent data builders. + pub create_inherent_data_providers: CIDP, + /// The block import handle. + pub block_import: BI, + /// An interface to the relay-chain client. + pub relay_client: Arc, + /// The keystore handle used for accessing parachain key material. + pub keystore: KeystorePtr, + /// The identifier of the parachain within the relay-chain. + pub para_id: ParaId, + /// The block proposer used for building blocks. + pub proposer: Proposer, + /// The collator service used for bundling proposals into collations and announcing + /// to the network. + pub collator_service: CS, +} + +/// A utility struct for writing collation logic that makes use of Aura entirely +/// or in part. See module docs for more details. +pub struct Collator { + create_inherent_data_providers: CIDP, + block_import: BI, + relay_client: Arc, + keystore: KeystorePtr, + para_id: ParaId, + proposer: Proposer, + collator_service: CS, + _marker: std::marker::PhantomData<(Block, P)>, +} + +impl Collator +where + Block: BlockT, + RClient: RelayChainInterface, + CIDP: CreateInherentDataProviders + 'static, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: ProposerInterface, + Proposer::Transaction: Sync, + CS: CollatorServiceInterface, + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, +{ + /// Instantiate a new instance of the `Aura` manager. + pub fn new(params: Params) -> Self { + Collator { + create_inherent_data_providers: params.create_inherent_data_providers, + block_import: params.block_import, + relay_client: params.relay_client, + keystore: params.keystore, + para_id: params.para_id, + proposer: params.proposer, + collator_service: params.collator_service, + _marker: std::marker::PhantomData, + } + } + + /// Explicitly creates the inherent data for parachain block authoring and overrides + /// the timestamp inherent data with the one provided, if any. + pub async fn create_inherent_data( + &self, + relay_parent: PHash, + validation_data: &PersistedValidationData, + parent_hash: Block::Hash, + timestamp: Option, + ) -> Result<(ParachainInherentData, InherentData), Box> { + let paras_inherent_data = ParachainInherentData::create_at( + relay_parent, + &self.relay_client, + validation_data, + self.para_id, + ) + .await; + + let paras_inherent_data = match paras_inherent_data { + Some(p) => p, + None => + return Err( + format!("Could not create paras inherent data at {:?}", relay_parent).into() + ), + }; + + let mut other_inherent_data = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .map_err(|e| e as Box) + .await? + .create_inherent_data() + .await + .map_err(Box::new)?; + + if let Some(timestamp) = timestamp { + other_inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp); + } + + Ok((paras_inherent_data, other_inherent_data)) + } + + /// Propose, seal, and import a block, packaging it into a collation. + /// + /// Provide the slot to build at as well as any other necessary pre-digest logs, + /// the inherent data, and the proposal duration and PoV size limits. + /// + /// The Aura pre-digest should not be explicitly provided and is set internally. + /// + /// This does not announce the collation to the parachain network or the relay chain. + pub async fn collate( + &mut self, + parent_header: &Block::Header, + slot_claim: &SlotClaim, + pre_digest: impl Into>>, + inherent_data: (ParachainInherentData, InherentData), + proposal_duration: Duration, + max_pov_size: usize, + ) -> Result<(Collation, ParachainBlockData, Block::Hash), Box> { + let mut digest = pre_digest.into().unwrap_or_default(); + digest.push(slot_claim.pre_digest.clone()); + + let proposal = self + .proposer + .propose( + &parent_header, + &inherent_data.0, + inherent_data.1, + Digest { logs: digest }, + proposal_duration, + Some(max_pov_size), + ) + .await + .map_err(|e| Box::new(e))?; + + let sealed_importable = seal::<_, _, P>( + proposal.block, + proposal.storage_changes, + &slot_claim.author_pub, + &self.keystore, + )?; + + let post_hash = sealed_importable.post_hash(); + let block = Block::new( + sealed_importable.post_header(), + sealed_importable + .body + .as_ref() + .expect("body always created with this `propose` fn; qed") + .clone(), + ); + + self.block_import.import_block(sealed_importable).await?; + + if let Some((collation, block_data)) = self.collator_service.build_collation( + parent_header, + post_hash, + ParachainCandidate { block, proof: proposal.proof }, + ) { + tracing::info!( + target: crate::LOG_TARGET, + "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", + block_data.header().encode().len() as f64 / 1024f64, + block_data.extrinsics().encode().len() as f64 / 1024f64, + block_data.storage_proof().encode().len() as f64 / 1024f64, + ); + + if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { + tracing::info!( + target: crate::LOG_TARGET, + "Compressed PoV size: {}kb", + pov.block_data.0.len() as f64 / 1024f64, + ); + } + + Ok((collation, block_data, post_hash)) + } else { + Err(format!("Unable to produce collation").into()) + } + } + + /// Get the underlying collator service. + pub fn collator_service(&self) -> &CS { + &self.collator_service + } +} + +/// A claim on an Aura slot. +pub struct SlotClaim { + author_pub: Pub, + pre_digest: DigestItem, + timestamp: Timestamp, +} + +impl SlotClaim { + /// Get the author's public key. + pub fn author_pub(&self) -> &Pub { + &self.author_pub + } + + /// Get the Aura pre-digest for this slot. + pub fn pre_digest(&self) -> &DigestItem { + &self.pre_digest + } + + /// Get the timestamp corresponding to the relay-chain slot this claim was + /// generated against. + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } +} + +/// Attempt to claim a slot derived from the given relay-parent header's slot. +pub async fn claim_slot( + client: &C, + parent_hash: B::Hash, + relay_parent_header: &PHeader, + slot_duration: SlotDuration, + relay_chain_slot_duration: SlotDuration, + keystore: &KeystorePtr, +) -> Result>, Box> +where + B: BlockT, + C: ProvideRuntimeApi + Send + Sync + 'static, + C::Api: AuraApi, + P: Pair, + P::Public: Encode + Decode, + P::Signature: Encode + Decode, +{ + // load authorities + let authorities = client.runtime_api().authorities(parent_hash).map_err(Box::new)?; + + // Determine the current slot and timestamp based on the relay-parent's. + let (slot_now, timestamp) = + match sc_consensus_babe::find_pre_digest::(relay_parent_header) { + Ok(babe_pre_digest) => { + let t = + Timestamp::new(relay_chain_slot_duration.as_millis() * *babe_pre_digest.slot()); + let slot = Slot::from_timestamp(t, slot_duration); + + (slot, t) + }, + Err(_) => return Ok(None), + }; + + // Try to claim the slot locally. + let author_pub = { + let res = aura_internal::claim_slot::

(slot_now, &authorities, keystore).await; + match res { + Some(p) => p, + None => return Ok(None), + } + }; + + // Produce the pre-digest. + let pre_digest = aura_internal::pre_digest::

(slot_now); + + Ok(Some(SlotClaim { author_pub, pre_digest, timestamp })) +} + +/// Seal a block with a signature in the header. +pub fn seal( + pre_sealed: B, + storage_changes: StorageChanges>, + author_pub: &P::Public, + keystore: &KeystorePtr, +) -> Result, Box> +where + P: Pair, + P::Signature: Encode + Decode + TryFrom>, + P::Public: AppPublic, +{ + let (pre_header, body) = pre_sealed.deconstruct(); + let pre_hash = pre_header.hash(); + let block_number = *pre_header.number(); + + // seal the block. + let block_import_params = { + let seal_digest = + aura_internal::seal::<_, P>(&pre_hash, &author_pub, keystore).map_err(Box::new)?; + let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, pre_header); + block_import_params.post_digests.push(seal_digest); + block_import_params.body = Some(body.clone()); + block_import_params.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); + block_import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + block_import_params + }; + let post_hash = block_import_params.post_hash(); + + tracing::info!( + target: crate::LOG_TARGET, + "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", + block_number, + post_hash, + pre_hash, + ); + + Ok(block_import_params) +} From 9628e4458480a0ee1efd1a24787a641735a31dac Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 22 May 2023 16:12:05 -0400 Subject: [PATCH 10/39] extract different implementations into own modules --- client/consensus/aura/src/collators/basic.rs | 377 +++++++++++++++ .../consensus/aura/src/collators/lookahead.rs | 182 ++++++++ client/consensus/aura/src/collators/mod.rs | 24 + client/consensus/aura/src/lib.rs | 1 + client/consensus/aura/src/unstable_reimpl.rs | 442 +----------------- 5 files changed, 590 insertions(+), 436 deletions(-) create mode 100644 client/consensus/aura/src/collators/basic.rs create mode 100644 client/consensus/aura/src/collators/lookahead.rs create mode 100644 client/consensus/aura/src/collators/mod.rs diff --git a/client/consensus/aura/src/collators/basic.rs b/client/consensus/aura/src/collators/basic.rs new file mode 100644 index 00000000000..c6f91af637e --- /dev/null +++ b/client/consensus/aura/src/collators/basic.rs @@ -0,0 +1,377 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! This provides the option to run a basic relay-chain driven Aura implementation +//! +//! For more information about AuRa, the Substrate crate should be checked. + +use codec::{Decode, Encode}; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{ + ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams, +}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_core::{ + relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, +}; +use cumulus_primitives_parachain_inherent::ParachainInherentData; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_node_primitives::{CollationResult, MaybeCompressedPoV}; +use polkadot_overseer::Handle as OverseerHandle; +use polkadot_primitives::{Block as PBlock, CollatorPair, Header as PHeader, Id as ParaId}; + +use futures::prelude::*; +use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; +use sc_consensus::{ + import_queue::{BasicQueue, Verifier as VerifierT}, + BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction, +}; +use sc_consensus_aura::standalone as aura_internal; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::HeaderBackend; +use sp_consensus::{error::Error as ConsensusError, BlockOrigin, SyncOracle}; +use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_core::crypto::Pair; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_keystore::KeystorePtr; +use sp_runtime::{ + generic::Digest, + traits::{Block as BlockT, HashFor, Header as HeaderT, Member}, +}; +use sp_state_machine::StorageChanges; +use sp_timestamp::Timestamp; +use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; + +/// Parameters for [`run`]. +pub struct Params { + pub create_inherent_data_providers: CIDP, + pub block_import: BI, + pub para_client: Arc, + pub relay_client: Arc, + pub sync_oracle: SO, + pub keystore: KeystorePtr, + pub key: CollatorPair, + pub para_id: ParaId, + pub overseer_handle: OverseerHandle, + pub slot_duration: SlotDuration, + pub relay_chain_slot_duration: SlotDuration, + pub proposer: Proposer, + pub collator_service: CS, +} + +/// Run bare Aura consensus as a relay-chain-driven collator. +pub async fn run( + params: Params, +) where + Block: BlockT, + Client: ProvideRuntimeApi + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: AuraApi + CollectCollationInfo, + RClient: RelayChainInterface, + CIDP: CreateInherentDataProviders + 'static, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + Proposer: ProposerInterface, + Proposer::Transaction: Sync, + CS: CollatorServiceInterface, + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, +{ + let mut params = params; + + let mut collation_requests = cumulus_client_collator::relay_chain_driven::init( + params.key, + params.para_id, + params.overseer_handle, + ) + .await; + + while let Some(request) = collation_requests.next().await { + macro_rules! reject_with_error { + ($err:expr) => {{ + request.complete(None); + tracing::error!(target: crate::LOG_TARGET, err = ?{ $err }); + continue; + }}; + } + + macro_rules! try_request { + ($x:expr) => {{ + match $x { + Ok(x) => x, + Err(e) => reject_with_error!(e), + } + }}; + } + + let validation_data = request.persisted_validation_data(); + + let parent_header = + try_request!(Block::Header::decode(&mut &validation_data.parent_head.0[..])); + + let parent_hash = parent_header.hash(); + + if !params.collator_service.check_block_status(parent_hash, &parent_header) { + continue + } + + let relay_parent_header = match params.relay_client.header(*request.relay_parent()).await { + Err(e) => reject_with_error!(e), + Ok(None) => continue, // sanity: would be inconsistent to get `None` here + Ok(Some(h)) => h, + }; + + let claim = match claim_slot::<_, _, P>( + &*params.para_client, + parent_hash, + &relay_parent_header, + params.slot_duration, + params.relay_chain_slot_duration, + ¶ms.keystore, + ) + .await + { + Ok(None) => continue, + Ok(Some(c)) => c, + Err(e) => reject_with_error!(e), + }; + + let (parachain_inherent_data, other_inherent_data) = try_request!( + create_inherent_data( + *request.relay_parent(), + &validation_data, + parent_hash, + params.para_id, + claim.timestamp, + ¶ms.relay_client, + ¶ms.create_inherent_data_providers, + ) + .await + ); + + let proposal = try_request!( + params + .proposer + .propose( + &parent_header, + ¶chain_inherent_data, + other_inherent_data, + Digest { logs: vec![claim.pre_digest] }, + // TODO [https://github.com/paritytech/cumulus/issues/2439] + // We should call out to a pluggable interface that provides + // the proposal duration. + Duration::from_millis(500), + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + Some((validation_data.max_pov_size / 2) as usize), + ) + .await + ); + + let sealed_importable = try_request!(seal::<_, _, P>( + proposal.block, + proposal.storage_changes, + &claim.author_pub, + ¶ms.keystore, + )); + + let post_hash = sealed_importable.post_hash(); + let block = Block::new( + sealed_importable.post_header(), + sealed_importable + .body + .as_ref() + .expect("body always created with this `propose` fn; qed") + .clone(), + ); + + try_request!(params.block_import.import_block(sealed_importable).await); + + let response = if let Some((collation, b)) = params.collator_service.build_collation( + &parent_header, + post_hash, + ParachainCandidate { block, proof: proposal.proof }, + ) { + tracing::info!( + target: crate::LOG_TARGET, + "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", + b.header().encode().len() as f64 / 1024f64, + b.extrinsics().encode().len() as f64 / 1024f64, + b.storage_proof().encode().len() as f64 / 1024f64, + ); + + if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { + tracing::info!( + target: crate::LOG_TARGET, + "Compressed PoV size: {}kb", + pov.block_data.0.len() as f64 / 1024f64, + ); + } + + let result_sender = params.collator_service.announce_with_barrier(post_hash); + Some(CollationResult { collation, result_sender: Some(result_sender) }) + } else { + None + }; + + request.complete(response); + } +} + +/// A claim on an Aura slot. +struct SlotClaim { + author_pub: Pub, + pre_digest: sp_runtime::DigestItem, + timestamp: Timestamp, +} + +async fn claim_slot( + client: &C, + parent_hash: B::Hash, + relay_parent_header: &PHeader, + slot_duration: SlotDuration, + relay_chain_slot_duration: SlotDuration, + keystore: &KeystorePtr, +) -> Result>, Box> +where + B: BlockT, + C: ProvideRuntimeApi + Send + Sync + 'static, + C::Api: AuraApi, + P: Pair, + P::Public: Encode + Decode, + P::Signature: Encode + Decode, +{ + // load authorities + let authorities = client.runtime_api().authorities(parent_hash).map_err(Box::new)?; + + // Determine the current slot and timestamp based on the relay-parent's. + let (slot_now, timestamp) = + match sc_consensus_babe::find_pre_digest::(relay_parent_header) { + Ok(babe_pre_digest) => { + let t = + Timestamp::new(relay_chain_slot_duration.as_millis() * *babe_pre_digest.slot()); + let slot = Slot::from_timestamp(t, slot_duration); + + (slot, t) + }, + Err(_) => return Ok(None), + }; + + // Try to claim the slot locally. + let author_pub = { + let res = aura_internal::claim_slot::

(slot_now, &authorities, keystore).await; + match res { + Some(p) => p, + None => return Ok(None), + } + }; + + // Produce the pre-digest. + let pre_digest = aura_internal::pre_digest::

(slot_now); + + Ok(Some(SlotClaim { author_pub, pre_digest, timestamp })) +} + +// This explicitly creates the inherent data for parachains, as well as overriding the +// timestamp based on the slot number. +async fn create_inherent_data( + relay_parent: PHash, + validation_data: &PersistedValidationData, + parent_hash: B::Hash, + para_id: ParaId, + timestamp: Timestamp, + relay_chain_interface: &impl RelayChainInterface, + create_inherent_data_providers: &impl CreateInherentDataProviders, +) -> Result<(ParachainInherentData, InherentData), Box> { + let paras_inherent_data = ParachainInherentData::create_at( + relay_parent, + relay_chain_interface, + validation_data, + para_id, + ) + .await; + + let paras_inherent_data = match paras_inherent_data { + Some(p) => p, + None => + return Err(format!("Could not create paras inherent data at {:?}", relay_parent).into()), + }; + + let mut other_inherent_data = create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .map_err(|e| e as Box) + .await? + .create_inherent_data() + .await + .map_err(Box::new)?; + + other_inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp); + + Ok((paras_inherent_data, other_inherent_data)) +} + +fn seal( + pre_sealed: B, + storage_changes: StorageChanges>, + author_pub: &P::Public, + keystore: &KeystorePtr, +) -> Result, Box> +where + P: Pair, + P::Signature: Encode + Decode + TryFrom>, + P::Public: AppPublic, +{ + let (pre_header, body) = pre_sealed.deconstruct(); + let pre_hash = pre_header.hash(); + let block_number = *pre_header.number(); + + // seal the block. + let block_import_params = { + let seal_digest = + aura_internal::seal::<_, P>(&pre_hash, &author_pub, keystore).map_err(Box::new)?; + let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, pre_header); + block_import_params.post_digests.push(seal_digest); + block_import_params.body = Some(body.clone()); + block_import_params.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); + block_import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + block_import_params + }; + let post_hash = block_import_params.post_hash(); + + tracing::info!( + target: crate::LOG_TARGET, + "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", + block_number, + post_hash, + pre_hash, + ); + + Ok(block_import_params) +} diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs new file mode 100644 index 00000000000..74aecf7bacd --- /dev/null +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -0,0 +1,182 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +// TODO [now]: docs + +use codec::{Decode, Encode}; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{ + ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams, +}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_core::{ + relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, +}; +use cumulus_primitives_parachain_inherent::ParachainInherentData; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_node_primitives::{CollationResult, MaybeCompressedPoV}; +use polkadot_overseer::Handle as OverseerHandle; +use polkadot_primitives::{Block as PBlock, CollatorPair, Header as PHeader, Id as ParaId}; + +use futures::prelude::*; +use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; +use sc_consensus::{ + import_queue::{BasicQueue, Verifier as VerifierT}, + BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction, +}; +use sc_consensus_aura::standalone as aura_internal; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::HeaderBackend; +use sp_consensus::{error::Error as ConsensusError, BlockOrigin, SyncOracle}; +use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_core::crypto::Pair; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_keystore::KeystorePtr; +use sp_runtime::{ + generic::Digest, + traits::{Block as BlockT, HashFor, Header as HeaderT, Member}, +}; +use sp_state_machine::StorageChanges; +use sp_timestamp::Timestamp; +use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; + +/// Parameters for [`run`]. +pub struct Params { + pub create_inherent_data_providers: CIDP, + pub block_import: BI, + pub para_client: Arc, + pub relay_client: Arc, + pub sync_oracle: SO, + pub keystore: KeystorePtr, + pub key: CollatorPair, + pub para_id: ParaId, + pub overseer_handle: OverseerHandle, + pub slot_duration: SlotDuration, + pub relay_chain_slot_duration: SlotDuration, + pub proposer: Proposer, + pub collator_service: CS, +} + +/// Run async-backing-friendly Aura. +pub async fn run( + params: Params, +) where + Block: BlockT, + Client: ProvideRuntimeApi + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: AuraApi + CollectCollationInfo, + RClient: RelayChainInterface, + CIDP: CreateInherentDataProviders + 'static, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + Proposer: ProposerInterface, + Proposer::Transaction: Sync, + CS: CollatorServiceInterface, + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, +{ + let mut params = params; + + let mut import_notifications = match params.relay_client.import_notification_stream().await { + Ok(s) => s, + Err(err) => { + tracing::error!( + target: crate::LOG_TARGET, + ?err, + "Failed to initialize consensus: no relay chain import notification stream" + ); + + return + }, + }; + + while let Some(relay_parent_header) = import_notifications.next().await { + let relay_parent = relay_parent_header.hash(); + + // TODO [now]: get asynchronous backing parameters from the relay-chain + // runtime. + + let parent_search_params = ParentSearchParams { + relay_parent, + para_id: params.para_id, + ancestry_lookback: unimplemented!(), + max_depth: unimplemented!(), // max unincluded segment len + ignore_alternative_branches: true, + }; + + // TODO [now]: remove this in favor of one passed in as a parameter. + let fake_hack: sc_client_api::in_mem::Blockchain = unimplemented!(); + + let potential_parents = cumulus_client_consensus_common::find_potential_parents::( + parent_search_params, + &fake_hack, // sp_blockchain::Backend + ¶ms.relay_client, + ) + .await; + + let mut potential_parents = match potential_parents { + Err(e) => { + tracing::error!( + target: crate::LOG_TARGET, + ?relay_parent, + err = ?e, + "Could not fetch potential parents to build upon" + ); + + continue + }, + Ok(x) => x, + }; + + // Sort by depth, descending, to choose the longest chain, and lazily filter + // by those with space. + potential_parents.sort_by(|a, b| b.depth.cmp(&a.depth)); + let potential_parents = potential_parents + .into_iter() + .filter(|p| can_build_upon(p.hash, &*params.para_client)); + + if let Some(parent) = potential_parents.next() { + // TODO [now]: build and announce collations recursively until + // `can_build_upon` fails. + unimplemented!() + } + } +} + +fn can_build_upon(block_hash: Block::Hash, client: &Client) -> bool +where + Client: ProvideRuntimeApi, +{ + // TODO [now]: claim slot, maybe with an authorities cache to avoid + // all validators doing this every new relay-chain block. + // Actually, as long as sessions are based on slot number then they should + // be the same for all... + // + // TODO [now]: new runtime API, + // AuraUnincludedSegmentApi::has_space(slot) or something like it. + unimplemented!() +} diff --git a/client/consensus/aura/src/collators/mod.rs b/client/consensus/aura/src/collators/mod.rs new file mode 100644 index 00000000000..55128dfdc85 --- /dev/null +++ b/client/consensus/aura/src/collators/mod.rs @@ -0,0 +1,24 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Stock, pure Aura collators. +//! +//! This includes the [`basic`] collator, which only builds on top of the most recently +//! included parachain block, as well as the [`lookahead`] collator, which prospectively +//! builds on parachain blocks which have not yet been included in the relay chain. + +pub mod basic; +pub mod lookahead; diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 1202f05486b..53b61a56355 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -51,6 +51,7 @@ pub use sc_consensus_aura::{slot_duration, AuraVerifier, BuildAuraWorkerParams, pub use sc_consensus_slots::InherentDataProviderExt; pub mod collator; +pub mod collators; pub mod unstable_reimpl; const LOG_TARGET: &str = "aura::cumulus"; diff --git a/client/consensus/aura/src/unstable_reimpl.rs b/client/consensus/aura/src/unstable_reimpl.rs index 4875e04425c..ceb0e42bf88 100644 --- a/client/consensus/aura/src/unstable_reimpl.rs +++ b/client/consensus/aura/src/unstable_reimpl.rs @@ -14,13 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -//! The AuRa consensus algorithm for parachains. -//! -//! This extends the Substrate provided AuRa consensus implementation to make it compatible for -//! parachains. This provides the option to run a "bare" relay-chain driven Aura implementation, -//! but also exposes the core functionalities separately to be composed into more complex implementations. -//! -//! For more information about AuRa, the Substrate crate should be checked. +// TODO [now]: docs + rename file here use codec::{Decode, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; @@ -63,435 +57,6 @@ use sp_state_machine::StorageChanges; use sp_timestamp::Timestamp; use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; -/// Parameters for [`run_bare_relay_driven`]. -pub struct Params { - pub create_inherent_data_providers: CIDP, - pub block_import: BI, - pub para_client: Arc, - pub relay_client: Arc, - pub sync_oracle: SO, - pub keystore: KeystorePtr, - pub key: CollatorPair, - pub para_id: ParaId, - pub overseer_handle: OverseerHandle, - pub slot_duration: SlotDuration, - pub relay_chain_slot_duration: SlotDuration, - pub proposer: Proposer, - pub collator_service: CS, -} - -/// Run async-backing-friendly Aura. -pub async fn run_async_backing_driven( - params: Params, -) where - Block: BlockT, - Client: ProvideRuntimeApi - + BlockOf - + AuxStore - + HeaderBackend - + BlockBackend - + Send - + Sync - + 'static, - Client::Api: AuraApi + CollectCollationInfo, - RClient: RelayChainInterface, - CIDP: CreateInherentDataProviders + 'static, - BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, - Proposer: ProposerInterface, - Proposer::Transaction: Sync, - CS: CollatorServiceInterface, - P: Pair + Send + Sync, - P::Public: AppPublic + Hash + Member + Encode + Decode, - P::Signature: TryFrom> + Hash + Member + Encode + Decode, -{ - let mut params = params; - - let mut import_notifications = match params.relay_client.import_notification_stream().await { - Ok(s) => s, - Err(err) => { - tracing::error!( - target: crate::LOG_TARGET, - ?err, - "Failed to initialize consensus: no relay chain import notification stream" - ); - - return - }, - }; - - while let Some(relay_parent_header) = import_notifications.next().await { - let relay_parent = relay_parent_header.hash(); - - // TODO [now]: get asynchronous backing parameters from the relay-chain - // runtime. - - let parent_search_params = ParentSearchParams { - relay_parent, - para_id: params.para_id, - ancestry_lookback: unimplemented!(), - max_depth: unimplemented!(), // max unincluded segment len - ignore_alternative_branches: true, - }; - - // TODO [now]: remove this in favor of one passed in as a parameter. - let fake_hack: sc_client_api::in_mem::Blockchain = unimplemented!(); - - let potential_parents = cumulus_client_consensus_common::find_potential_parents::( - parent_search_params, - &fake_hack, // sp_blockchain::Backend - ¶ms.relay_client, - ) - .await; - - let mut potential_parents = match potential_parents { - Err(e) => { - tracing::error!( - target: crate::LOG_TARGET, - ?relay_parent, - err = ?e, - "Could not fetch potential parents to build upon" - ); - - continue - }, - Ok(x) => x, - }; - - // Sort by depth, descending, to choose the longest chain, and lazily filter - // by those with space. - potential_parents.sort_by(|a, b| b.depth.cmp(&a.depth)); - let potential_parents = potential_parents - .into_iter() - .filter(|p| can_build_upon(p.hash, &*params.para_client)); - - if let Some(parent) = potential_parents.next() { - // TODO [now]: build and announce collations recursively until - // `can_build_upon` fails. - unimplemented!() - } - } -} - -fn can_build_upon(block_hash: Block::Hash, client: &Client) -> bool -where - Client: ProvideRuntimeApi, -{ - // TODO [now]: claim slot, maybe with an authorities cache to avoid - // all validators doing this every new relay-chain block. - // Actually, as long as sessions are based on slot number then they should - // be the same for all... - // - // TODO [now]: new runtime API, - // AuraUnincludedSegmentApi::has_space(slot) or something like it. - unimplemented!() -} - -/// Run bare Aura consensus as a relay-chain-driven collator. -pub async fn run_bare_relay_driven( - params: Params, -) where - Block: BlockT, - Client: ProvideRuntimeApi - + BlockOf - + AuxStore - + HeaderBackend - + BlockBackend - + Send - + Sync - + 'static, - Client::Api: AuraApi + CollectCollationInfo, - RClient: RelayChainInterface, - CIDP: CreateInherentDataProviders + 'static, - BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, - Proposer: ProposerInterface, - Proposer::Transaction: Sync, - CS: CollatorServiceInterface, - P: Pair + Send + Sync, - P::Public: AppPublic + Hash + Member + Encode + Decode, - P::Signature: TryFrom> + Hash + Member + Encode + Decode, -{ - let mut params = params; - - let mut collation_requests = cumulus_client_collator::relay_chain_driven::init( - params.key, - params.para_id, - params.overseer_handle, - ) - .await; - - while let Some(request) = collation_requests.next().await { - macro_rules! reject_with_error { - ($err:expr) => {{ - request.complete(None); - tracing::error!(target: crate::LOG_TARGET, err = ?{ $err }); - continue; - }}; - } - - macro_rules! try_request { - ($x:expr) => {{ - match $x { - Ok(x) => x, - Err(e) => reject_with_error!(e), - } - }}; - } - - let validation_data = request.persisted_validation_data(); - - let parent_header = - try_request!(Block::Header::decode(&mut &validation_data.parent_head.0[..])); - - let parent_hash = parent_header.hash(); - - if !params.collator_service.check_block_status(parent_hash, &parent_header) { - continue - } - - let relay_parent_header = match params.relay_client.header(*request.relay_parent()).await { - Err(e) => reject_with_error!(e), - Ok(None) => continue, // sanity: would be inconsistent to get `None` here - Ok(Some(h)) => h, - }; - - let claim = match claim_slot::<_, _, P>( - &*params.para_client, - parent_hash, - &relay_parent_header, - params.slot_duration, - params.relay_chain_slot_duration, - ¶ms.keystore, - ) - .await - { - Ok(None) => continue, - Ok(Some(c)) => c, - Err(e) => reject_with_error!(e), - }; - - let (parachain_inherent_data, other_inherent_data) = try_request!( - create_inherent_data( - *request.relay_parent(), - &validation_data, - parent_hash, - params.para_id, - claim.timestamp, - ¶ms.relay_client, - ¶ms.create_inherent_data_providers, - ) - .await - ); - - let proposal = try_request!( - params - .proposer - .propose( - &parent_header, - ¶chain_inherent_data, - other_inherent_data, - Digest { logs: vec![claim.pre_digest] }, - // TODO [https://github.com/paritytech/cumulus/issues/2439] - // We should call out to a pluggable interface that provides - // the proposal duration. - Duration::from_millis(500), - // Set the block limit to 50% of the maximum PoV size. - // - // TODO: If we got benchmarking that includes the proof size, - // we should be able to use the maximum pov size. - Some((validation_data.max_pov_size / 2) as usize), - ) - .await - ); - - let sealed_importable = try_request!(seal::<_, _, P>( - proposal.block, - proposal.storage_changes, - &claim.author_pub, - ¶ms.keystore, - )); - - let post_hash = sealed_importable.post_hash(); - let block = Block::new( - sealed_importable.post_header(), - sealed_importable - .body - .as_ref() - .expect("body always created with this `propose` fn; qed") - .clone(), - ); - - try_request!(params.block_import.import_block(sealed_importable).await); - - let response = if let Some((collation, b)) = params.collator_service.build_collation( - &parent_header, - post_hash, - ParachainCandidate { block, proof: proposal.proof }, - ) { - tracing::info!( - target: crate::LOG_TARGET, - "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", - b.header().encode().len() as f64 / 1024f64, - b.extrinsics().encode().len() as f64 / 1024f64, - b.storage_proof().encode().len() as f64 / 1024f64, - ); - - if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { - tracing::info!( - target: crate::LOG_TARGET, - "Compressed PoV size: {}kb", - pov.block_data.0.len() as f64 / 1024f64, - ); - } - - let result_sender = params.collator_service.announce_with_barrier(post_hash); - Some(CollationResult { collation, result_sender: Some(result_sender) }) - } else { - None - }; - - request.complete(response); - } -} - -fn slot_now(slot_duration: SlotDuration) -> Slot { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time().timestamp(); - Slot::from_timestamp(timestamp, slot_duration) -} - -/// A claim on an Aura slot. -struct SlotClaim { - author_pub: Pub, - pre_digest: sp_runtime::DigestItem, - timestamp: Timestamp, -} - -async fn claim_slot( - client: &C, - parent_hash: B::Hash, - relay_parent_header: &PHeader, - slot_duration: SlotDuration, - relay_chain_slot_duration: SlotDuration, - keystore: &KeystorePtr, -) -> Result>, Box> -where - B: BlockT, - C: ProvideRuntimeApi + Send + Sync + 'static, - C::Api: AuraApi, - P: Pair, - P::Public: Encode + Decode, - P::Signature: Encode + Decode, -{ - // load authorities - let authorities = client.runtime_api().authorities(parent_hash).map_err(Box::new)?; - - // Determine the current slot and timestamp based on the relay-parent's. - let (slot_now, timestamp) = - match sc_consensus_babe::find_pre_digest::(relay_parent_header) { - Ok(babe_pre_digest) => { - let t = - Timestamp::new(relay_chain_slot_duration.as_millis() * *babe_pre_digest.slot()); - let slot = Slot::from_timestamp(t, slot_duration); - - (slot, t) - }, - Err(_) => return Ok(None), - }; - - // Try to claim the slot locally. - let author_pub = { - let res = aura_internal::claim_slot::

(slot_now, &authorities, keystore).await; - match res { - Some(p) => p, - None => return Ok(None), - } - }; - - // Produce the pre-digest. - let pre_digest = aura_internal::pre_digest::

(slot_now); - - Ok(Some(SlotClaim { author_pub, pre_digest, timestamp })) -} - -// This explicitly creates the inherent data for parachains, as well as overriding the -// timestamp based on the slot number. -async fn create_inherent_data( - relay_parent: PHash, - validation_data: &PersistedValidationData, - parent_hash: B::Hash, - para_id: ParaId, - timestamp: Timestamp, - relay_chain_interface: &impl RelayChainInterface, - create_inherent_data_providers: &impl CreateInherentDataProviders, -) -> Result<(ParachainInherentData, InherentData), Box> { - let paras_inherent_data = ParachainInherentData::create_at( - relay_parent, - relay_chain_interface, - validation_data, - para_id, - ) - .await; - - let paras_inherent_data = match paras_inherent_data { - Some(p) => p, - None => - return Err(format!("Could not create paras inherent data at {:?}", relay_parent).into()), - }; - - let mut other_inherent_data = create_inherent_data_providers - .create_inherent_data_providers(parent_hash, ()) - .map_err(|e| e as Box) - .await? - .create_inherent_data() - .await - .map_err(Box::new)?; - - other_inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp); - - Ok((paras_inherent_data, other_inherent_data)) -} - -fn seal( - pre_sealed: B, - storage_changes: StorageChanges>, - author_pub: &P::Public, - keystore: &KeystorePtr, -) -> Result, Box> -where - P: Pair, - P::Signature: Encode + Decode + TryFrom>, - P::Public: AppPublic, -{ - let (pre_header, body) = pre_sealed.deconstruct(); - let pre_hash = pre_header.hash(); - let block_number = *pre_header.number(); - - // seal the block. - let block_import_params = { - let seal_digest = - aura_internal::seal::<_, P>(&pre_hash, &author_pub, keystore).map_err(Box::new)?; - let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, pre_header); - block_import_params.post_digests.push(seal_digest); - block_import_params.body = Some(body.clone()); - block_import_params.state_action = - StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); - block_import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - block_import_params - }; - let post_hash = block_import_params.post_hash(); - - tracing::info!( - target: crate::LOG_TARGET, - "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", - block_number, - post_hash, - pre_hash, - ); - - Ok(block_import_params) -} - struct Verifier { client: Arc, create_inherent_data_providers: CIDP, @@ -621,6 +186,11 @@ where } } +fn slot_now(slot_duration: SlotDuration) -> Slot { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time().timestamp(); + Slot::from_timestamp(timestamp, slot_duration) +} + /// Start an import queue for a Cumulus node which checks blocks' seals and inherent data. /// /// Pass in only inherent data providers which don't include aura or parachain consensus inherents, From 8dca02a284c5d843c627446bfe4938ec76477043 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 30 May 2023 14:19:38 -0500 Subject: [PATCH 11/39] make interface more convenient --- client/consensus/aura/src/collator.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/client/consensus/aura/src/collator.rs b/client/consensus/aura/src/collator.rs index 1194ce694f5..eefb825c7f4 100644 --- a/client/consensus/aura/src/collator.rs +++ b/client/consensus/aura/src/collator.rs @@ -122,7 +122,7 @@ where relay_parent: PHash, validation_data: &PersistedValidationData, parent_hash: Block::Hash, - timestamp: Option, + timestamp: impl Into>, ) -> Result<(ParachainInherentData, InherentData), Box> { let paras_inherent_data = ParachainInherentData::create_at( relay_parent, @@ -149,7 +149,7 @@ where .await .map_err(Box::new)?; - if let Some(timestamp) = timestamp { + if let Some(timestamp) = timestamp.into() { other_inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp); } @@ -168,12 +168,12 @@ where &mut self, parent_header: &Block::Header, slot_claim: &SlotClaim, - pre_digest: impl Into>>, + additional_pre_digest: impl Into>>, inherent_data: (ParachainInherentData, InherentData), proposal_duration: Duration, max_pov_size: usize, ) -> Result<(Collation, ParachainBlockData, Block::Hash), Box> { - let mut digest = pre_digest.into().unwrap_or_default(); + let mut digest = additional_pre_digest.into().unwrap_or_default(); digest.push(slot_claim.pre_digest.clone()); let proposal = self From 27f3f92951e38a93b142330d7ab2202415ada7ee Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 30 May 2023 14:20:03 -0500 Subject: [PATCH 12/39] docs and todos for lookahead --- .../consensus/aura/src/collators/lookahead.rs | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 74aecf7bacd..2641e18bdfb 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -14,7 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -// TODO [now]: docs +//! A collator for Aura that looks ahead of the most recently included parachain block +//! when determining what to build upon. +//! +//! This collator also builds additional blocks when the maximum backlog is not saturated. +//! The size of the backlog is determined by invoking a runtime API. If that runtime API +//! is not supported, this assumes a maximum backlog size of 1. +//! +//! This takes more advantage of asynchronous backing, though not complete advantage. +//! When the backlog is not saturated, this approach lets the backlog temporarily 'catch up' +//! with periods of higher throughput. When the backlog is saturated, we typically +//! fall back to the limited cadence of a single parachain block per relay-chain block. +//! +//! Despite this, the fact that there is a backlog at all allows us to spend more time +//! building the block, as there is some buffer before it can get posted to the relay-chain. +//! The main limitation is block propagation time - i.e. the new blocks created by an author +//! must be propagated to the next author before their turn. use codec::{Decode, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; @@ -118,7 +133,9 @@ pub async fn run( let relay_parent = relay_parent_header.hash(); // TODO [now]: get asynchronous backing parameters from the relay-chain - // runtime. + // runtime. why? + + // TOOD [now]: get slot from relay parent header let parent_search_params = ParentSearchParams { relay_parent, @@ -175,6 +192,7 @@ where // all validators doing this every new relay-chain block. // Actually, as long as sessions are based on slot number then they should // be the same for all... + // That is, blocks with the same relay-parent should have the same session. // // TODO [now]: new runtime API, // AuraUnincludedSegmentApi::has_space(slot) or something like it. From 11b8062acbfceb850191390384acb482319eaae2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 30 May 2023 14:20:14 -0500 Subject: [PATCH 13/39] refactor basic collator to use new collator utility --- client/consensus/aura/src/collators/basic.rs | 252 ++++--------------- 1 file changed, 44 insertions(+), 208 deletions(-) diff --git a/client/consensus/aura/src/collators/basic.rs b/client/consensus/aura/src/collators/basic.rs index c6f91af637e..837c7e5bb28 100644 --- a/client/consensus/aura/src/collators/basic.rs +++ b/client/consensus/aura/src/collators/basic.rs @@ -14,7 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -//! This provides the option to run a basic relay-chain driven Aura implementation +//! This provides the option to run a basic relay-chain driven Aura implementation. +//! +//! This collator only builds on top of the most recently included block, limiting the +//! block time to a maximum of two times the relay-chain block time, and requiring the +//! block to be built and distributed to validators between two relay-chain blocks. //! //! For more information about AuRa, the Substrate crate should be checked. @@ -59,6 +63,8 @@ use sp_state_machine::StorageChanges; use sp_timestamp::Timestamp; use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; +use crate::collator as collator_util; + /// Parameters for [`run`]. pub struct Params { pub create_inherent_data_providers: CIDP, @@ -110,6 +116,20 @@ pub async fn run( ) .await; + let mut collator = { + let params = collator_util::Params { + create_inherent_data_providers: params.create_inherent_data_providers, + block_import: params.block_import, + relay_client: params.relay_client.clone(), + keystore: params.keystore.clone(), + para_id: params.para_id, + proposer: params.proposer, + collator_service: params.collator_service, + }; + + collator_util::Collator::::new(params) + }; + while let Some(request) = collation_requests.next().await { macro_rules! reject_with_error { ($err:expr) => {{ @@ -135,7 +155,7 @@ pub async fn run( let parent_hash = parent_header.hash(); - if !params.collator_service.check_block_status(parent_hash, &parent_header) { + if !collator.collator_service().check_block_status(parent_hash, &parent_header) { continue } @@ -145,7 +165,7 @@ pub async fn run( Ok(Some(h)) => h, }; - let claim = match claim_slot::<_, _, P>( + let claim = match collator_util::claim_slot::<_, _, P>( &*params.para_client, parent_hash, &relay_parent_header, @@ -161,217 +181,33 @@ pub async fn run( }; let (parachain_inherent_data, other_inherent_data) = try_request!( - create_inherent_data( + collator.create_inherent_data( *request.relay_parent(), &validation_data, parent_hash, - params.para_id, - claim.timestamp, - ¶ms.relay_client, - ¶ms.create_inherent_data_providers, - ) - .await + claim.timestamp(), + ).await ); - let proposal = try_request!( - params - .proposer - .propose( - &parent_header, - ¶chain_inherent_data, - other_inherent_data, - Digest { logs: vec![claim.pre_digest] }, - // TODO [https://github.com/paritytech/cumulus/issues/2439] - // We should call out to a pluggable interface that provides - // the proposal duration. - Duration::from_millis(500), - // Set the block limit to 50% of the maximum PoV size. - // - // TODO: If we got benchmarking that includes the proof size, - // we should be able to use the maximum pov size. - Some((validation_data.max_pov_size / 2) as usize), - ) - .await + let (collation, _, post_hash) = try_request!( + collator.collate( + &parent_header, + &claim, + None, + (parachain_inherent_data, other_inherent_data), + // TODO [https://github.com/paritytech/cumulus/issues/2439] + // We should call out to a pluggable interface that provides + // the proposal duration. + Duration::from_millis(500), + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + (validation_data.max_pov_size / 2) as usize, + ).await ); - let sealed_importable = try_request!(seal::<_, _, P>( - proposal.block, - proposal.storage_changes, - &claim.author_pub, - ¶ms.keystore, - )); - - let post_hash = sealed_importable.post_hash(); - let block = Block::new( - sealed_importable.post_header(), - sealed_importable - .body - .as_ref() - .expect("body always created with this `propose` fn; qed") - .clone(), - ); - - try_request!(params.block_import.import_block(sealed_importable).await); - - let response = if let Some((collation, b)) = params.collator_service.build_collation( - &parent_header, - post_hash, - ParachainCandidate { block, proof: proposal.proof }, - ) { - tracing::info!( - target: crate::LOG_TARGET, - "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", - b.header().encode().len() as f64 / 1024f64, - b.extrinsics().encode().len() as f64 / 1024f64, - b.storage_proof().encode().len() as f64 / 1024f64, - ); - - if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { - tracing::info!( - target: crate::LOG_TARGET, - "Compressed PoV size: {}kb", - pov.block_data.0.len() as f64 / 1024f64, - ); - } - - let result_sender = params.collator_service.announce_with_barrier(post_hash); - Some(CollationResult { collation, result_sender: Some(result_sender) }) - } else { - None - }; - - request.complete(response); + let result_sender = Some(collator.collator_service().announce_with_barrier(post_hash)); + request.complete(Some(CollationResult { collation, result_sender })); } } - -/// A claim on an Aura slot. -struct SlotClaim { - author_pub: Pub, - pre_digest: sp_runtime::DigestItem, - timestamp: Timestamp, -} - -async fn claim_slot( - client: &C, - parent_hash: B::Hash, - relay_parent_header: &PHeader, - slot_duration: SlotDuration, - relay_chain_slot_duration: SlotDuration, - keystore: &KeystorePtr, -) -> Result>, Box> -where - B: BlockT, - C: ProvideRuntimeApi + Send + Sync + 'static, - C::Api: AuraApi, - P: Pair, - P::Public: Encode + Decode, - P::Signature: Encode + Decode, -{ - // load authorities - let authorities = client.runtime_api().authorities(parent_hash).map_err(Box::new)?; - - // Determine the current slot and timestamp based on the relay-parent's. - let (slot_now, timestamp) = - match sc_consensus_babe::find_pre_digest::(relay_parent_header) { - Ok(babe_pre_digest) => { - let t = - Timestamp::new(relay_chain_slot_duration.as_millis() * *babe_pre_digest.slot()); - let slot = Slot::from_timestamp(t, slot_duration); - - (slot, t) - }, - Err(_) => return Ok(None), - }; - - // Try to claim the slot locally. - let author_pub = { - let res = aura_internal::claim_slot::

(slot_now, &authorities, keystore).await; - match res { - Some(p) => p, - None => return Ok(None), - } - }; - - // Produce the pre-digest. - let pre_digest = aura_internal::pre_digest::

(slot_now); - - Ok(Some(SlotClaim { author_pub, pre_digest, timestamp })) -} - -// This explicitly creates the inherent data for parachains, as well as overriding the -// timestamp based on the slot number. -async fn create_inherent_data( - relay_parent: PHash, - validation_data: &PersistedValidationData, - parent_hash: B::Hash, - para_id: ParaId, - timestamp: Timestamp, - relay_chain_interface: &impl RelayChainInterface, - create_inherent_data_providers: &impl CreateInherentDataProviders, -) -> Result<(ParachainInherentData, InherentData), Box> { - let paras_inherent_data = ParachainInherentData::create_at( - relay_parent, - relay_chain_interface, - validation_data, - para_id, - ) - .await; - - let paras_inherent_data = match paras_inherent_data { - Some(p) => p, - None => - return Err(format!("Could not create paras inherent data at {:?}", relay_parent).into()), - }; - - let mut other_inherent_data = create_inherent_data_providers - .create_inherent_data_providers(parent_hash, ()) - .map_err(|e| e as Box) - .await? - .create_inherent_data() - .await - .map_err(Box::new)?; - - other_inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp); - - Ok((paras_inherent_data, other_inherent_data)) -} - -fn seal( - pre_sealed: B, - storage_changes: StorageChanges>, - author_pub: &P::Public, - keystore: &KeystorePtr, -) -> Result, Box> -where - P: Pair, - P::Signature: Encode + Decode + TryFrom>, - P::Public: AppPublic, -{ - let (pre_header, body) = pre_sealed.deconstruct(); - let pre_hash = pre_header.hash(); - let block_number = *pre_header.number(); - - // seal the block. - let block_import_params = { - let seal_digest = - aura_internal::seal::<_, P>(&pre_hash, &author_pub, keystore).map_err(Box::new)?; - let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, pre_header); - block_import_params.post_digests.push(seal_digest); - block_import_params.body = Some(body.clone()); - block_import_params.state_action = - StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); - block_import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - block_import_params - }; - let post_hash = block_import_params.post_hash(); - - tracing::info!( - target: crate::LOG_TARGET, - "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", - block_number, - post_hash, - pre_hash, - ); - - Ok(block_import_params) -} From 95a6ff52561286e57519355da8d77b7f5024999b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 31 May 2023 18:24:27 -0400 Subject: [PATCH 14/39] some more refactoring --- Cargo.lock | 3 ++ client/consensus/aura/src/collator.rs | 41 +++++++++++-------- .../consensus/aura/src/collators/lookahead.rs | 18 +++++++- client/consensus/common/Cargo.toml | 3 ++ client/consensus/common/src/lib.rs | 17 +++++++- 5 files changed, 63 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 271fac694c7..6a9a277746d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2269,11 +2269,14 @@ dependencies = [ "polkadot-primitives", "sc-client-api", "sc-consensus", + "sc-consensus-babe", "schnellru", "sp-blockchain", "sp-consensus", + "sp-consensus-slots", "sp-core", "sp-runtime", + "sp-timestamp", "sp-tracing", "sp-trie", "substrate-prometheus-endpoint", diff --git a/client/consensus/aura/src/collator.rs b/client/consensus/aura/src/collator.rs index eefb825c7f4..27ded942634 100644 --- a/client/consensus/aura/src/collator.rs +++ b/client/consensus/aura/src/collator.rs @@ -26,7 +26,7 @@ use codec::{Decode, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{ParachainBlockImportMarker, ParachainCandidate}; +use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker, ParachainCandidate}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{ relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, @@ -249,6 +249,22 @@ pub struct SlotClaim { } impl SlotClaim { + /// Create a slot-claim from the given author public key, slot, and timestamp. + /// + /// This does not check whether the author actually owns the slot or the timestamp + /// falls within the slot. + pub fn unchecked

(author_pub: Pub, slot: Slot, timestamp: Timestamp) -> Self where + P: Pair, + P::Public: Encode + Decode, + P::Signature: Encode + Decode + { + SlotClaim { + author_pub, + timestamp, + pre_digest: aura_internal::pre_digest::

(slot), + } + } + /// Get the author's public key. pub fn author_pub(&self) -> &Pub { &self.author_pub @@ -287,17 +303,13 @@ where let authorities = client.runtime_api().authorities(parent_hash).map_err(Box::new)?; // Determine the current slot and timestamp based on the relay-parent's. - let (slot_now, timestamp) = - match sc_consensus_babe::find_pre_digest::(relay_parent_header) { - Ok(babe_pre_digest) => { - let t = - Timestamp::new(relay_chain_slot_duration.as_millis() * *babe_pre_digest.slot()); - let slot = Slot::from_timestamp(t, slot_duration); - - (slot, t) - }, - Err(_) => return Ok(None), - }; + let (slot_now, timestamp) = match consensus_common::relay_slot_and_timestamp( + relay_parent_header, + relay_chain_slot_duration, + ) { + Some((_, t)) => (Slot::from_timestamp(t, slot_duration), t), + None => return Ok(None), + }; // Try to claim the slot locally. let author_pub = { @@ -308,10 +320,7 @@ where } }; - // Produce the pre-digest. - let pre_digest = aura_internal::pre_digest::

(slot_now); - - Ok(Some(SlotClaim { author_pub, pre_digest, timestamp })) + Ok(Some(SlotClaim::unchecked::

(author_pub, slot_now, timestamp))) } /// Seal a block with a signature in the header. diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 2641e18bdfb..bd676e6d4e2 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -72,6 +72,8 @@ use sp_state_machine::StorageChanges; use sp_timestamp::Timestamp; use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; +use crate::collator as collator_util; + /// Parameters for [`run`]. pub struct Params { pub create_inherent_data_providers: CIDP, @@ -129,14 +131,26 @@ pub async fn run( }, }; + let mut collator = { + let params = collator_util::Params { + create_inherent_data_providers: params.create_inherent_data_providers, + block_import: params.block_import, + relay_client: params.relay_client.clone(), + keystore: params.keystore.clone(), + para_id: params.para_id, + proposer: params.proposer, + collator_service: params.collator_service, + }; + + collator_util::Collator::::new(params) + }; + while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); // TODO [now]: get asynchronous backing parameters from the relay-chain // runtime. why? - // TOOD [now]: get slot from relay parent header - let parent_search_params = ParentSearchParams { relay_parent, para_id: params.para_id, diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 02b36320062..5dd6e9f6214 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -16,10 +16,13 @@ tracing = "0.1.37" # Substrate sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index 1284802e4b5..bac7f375216 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -15,13 +15,15 @@ // along with Cumulus. If not, see . use codec::Decode; -use polkadot_primitives::{Hash as PHash, PersistedValidationData}; +use polkadot_primitives::{Block as PBlock, Hash as PHash, Header as PHeader, PersistedValidationData}; use cumulus_primitives_core::{relay_chain::OccupiedCoreAssumption, ParaId}; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; use sc_client_api::Backend; use sc_consensus::{shared_data::SharedData, BlockImport, ImportResult}; +use sp_consensus_slots::{Slot, SlotDuration}; +use sp_timestamp::Timestamp; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::sync::Arc; @@ -350,3 +352,16 @@ pub async fn find_potential_parents( Ok(potential_parents) } + +/// Get the relay-parent slot and timestamp from a header. +pub fn relay_slot_and_timestamp( + relay_parent_header: &PHeader, + relay_chain_slot_duration: SlotDuration, +) -> Option<(Slot, Timestamp)> { + sc_consensus_babe::find_pre_digest::(relay_parent_header).map(|babe_pre_digest| { + let slot = babe_pre_digest.slot(); + let t = Timestamp::new(relay_chain_slot_duration.as_millis() * *slot); + + (slot, t) + }).ok() +} From 6408906c160459fa4c34e0073fc3ef7fbfa7f2fd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 6 Jun 2023 14:16:58 -0400 Subject: [PATCH 15/39] finish most of the control flow for new aura --- .../consensus/aura/src/collators/lookahead.rs | 148 +++++++++++++++--- 1 file changed, 128 insertions(+), 20 deletions(-) diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index bd676e6d4e2..97577e32a5b 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -34,6 +34,7 @@ use codec::{Decode, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{ + self as consensus_common, ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams, }; use cumulus_client_consensus_proposer::ProposerInterface; @@ -89,6 +90,7 @@ pub struct Params { pub relay_chain_slot_duration: SlotDuration, pub proposer: Proposer, pub collator_service: CS, + pub authoring_duration: Duration, } /// Run async-backing-friendly Aura. @@ -149,7 +151,28 @@ pub async fn run( let relay_parent = relay_parent_header.hash(); // TODO [now]: get asynchronous backing parameters from the relay-chain - // runtime. why? + // runtime. why? for the parent search parameters. + + let max_pov_size = match params.relay_client.persisted_validation_data( + relay_parent, + params.para_id, + OccupiedCoreAssumption::Included, + ).await { + Ok(None) => continue, + Ok(Some(pvd)) => pvd.max_pov_size, + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); + continue; + } + }; + + let (slot_now, timestamp) = match consensus_common::relay_slot_and_timestamp( + &relay_parent_header, + params.relay_chain_slot_duration, + ) { + None => continue, + Some((s, t)) => (Slot::from_timestamp(t, params.slot_duration), t), + }; let parent_search_params = ParentSearchParams { relay_parent, @@ -183,32 +206,117 @@ pub async fn run( Ok(x) => x, }; - // Sort by depth, descending, to choose the longest chain, and lazily filter - // by those with space. - potential_parents.sort_by(|a, b| b.depth.cmp(&a.depth)); - let potential_parents = potential_parents - .into_iter() - .filter(|p| can_build_upon(p.hash, &*params.para_client)); - - if let Some(parent) = potential_parents.next() { - // TODO [now]: build and announce collations recursively until - // `can_build_upon` fails. - unimplemented!() + let included_block = match potential_parents.iter().find(|x| x.depth == 0) { + None => continue, // also serves as an `is_empty` check. + Some(b) => b.hash, + }; + + let para_client = &*params.para_client; + let keystore = ¶ms.keystore; + let can_build_upon = |block_hash| can_build_upon( + slot_now, + timestamp, + block_hash, + included_block, + ¶_client, + &keystore, + ); + + // Sort by depth, ascending, to choose the longest chain. + // + // If the longest chain has space, build upon that. Otherwise, don't + // build at all. + potential_parents.sort_by_key(|a| &a.depth); + let initial_parent = match potential_parents.pop() { + None => continue, + Some(p) => p, + }; + + // Build in a loop until not allowed. Note that the authorities can change + // at any block, so we need to re-claim our slot every time. + let mut parent_hash = initial_parent.hash; + let mut parent_header = initial_parent.header; + loop { + let slot_claim = match can_build_upon(parent_hash).await { + None => break, + Some(c) => c, + }; + + let persisted_validation_data = PersistedValidationData { + parent_head: parent_header.encode(), + relay_parent_number: *relay_parent_header.number(), + relay_parent_storage_root: *relay_parent_header.state_root(), + max_pov_size, + }; + + // Build and announce collations recursively until + // `can_build_upon` fails or building a collation fails. + let (parachain_inherent_data, other_inherent_data) = match collator.create_inherent_data( + relay_parent, + &persisted_validation_data, + parent_hash, + slot_claim.timestamp(), + ).await { + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err); + break; + }, + Ok(x) => x, + }; + + let (new_block_hash, new_block_header) = match collator.collate( + &parent_header, + &slot_claim, + None, + (parachain_inherent_data, other_inherent_data), + params.authoring_duration, + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + (validation_data.max_pov_size / 2) as usize, + ).await { + Ok((collation, block_data, new_block_hash)) => { + parent_hash = new_block_hash; + parent_header = block_data.header; + + // TODO [now]: announce to parachain sub-network + + // TODO [link to github issue when i have internet]: + // announce collation to relay-chain validators. + } + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err); + break; + } + }; } } } -fn can_build_upon(block_hash: Block::Hash, client: &Client) -> bool +// Checks if we own the slot at the given block and whether there +// is space in the unincluded segment. +async fn can_build_upon( + slot: Slot, + timestamp: Timestamp, + block_hash: Block::Hash, + included_block: Block::Hash, + client: &Client, + keystore: &KeystorePtr, +) -> Option> where Client: ProvideRuntimeApi, + Client::Api: AuraApi, + P: Pair, + P::Public: Encode + Decode, + P::Signature: Encode + Decode, { - // TODO [now]: claim slot, maybe with an authorities cache to avoid - // all validators doing this every new relay-chain block. - // Actually, as long as sessions are based on slot number then they should - // be the same for all... - // That is, blocks with the same relay-parent should have the same session. - // + let authorities = client.runtime_api().authorities(block_hash).ok()?; + let author_pub = aura_internal::claim_slot::

(slot, &authorities, keystore).await?; + // TODO [now]: new runtime API, - // AuraUnincludedSegmentApi::has_space(slot) or something like it. + // AuraUnincludedSegmentApi::has_space(included_block, slot) or something like it. unimplemented!() + + Some(SlotClaim::unchecked(author_pub, slot, timestamp)) } From 14d50cf96dd75568d530375511b36ebba01c34f3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 13:28:50 -0500 Subject: [PATCH 16/39] introduce backend as parameter --- client/consensus/aura/src/collators/lookahead.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 97577e32a5b..86aa28ada1d 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -76,10 +76,11 @@ use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, tim use crate::collator as collator_util; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { pub create_inherent_data_providers: CIDP, pub block_import: BI, pub para_client: Arc, + pub para_backend: Arc, pub relay_client: Arc, pub sync_oracle: SO, pub keystore: KeystorePtr, @@ -94,8 +95,8 @@ pub struct Params { } /// Run async-backing-friendly Aura. -pub async fn run( - params: Params, +pub async fn run( + params: Params, ) where Block: BlockT, Client: ProvideRuntimeApi @@ -107,6 +108,7 @@ pub async fn run( + Sync + 'static, Client::Api: AuraApi + CollectCollationInfo, + Backend: sp_blockchain::Backend, RClient: RelayChainInterface, CIDP: CreateInherentDataProviders + 'static, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, @@ -182,12 +184,9 @@ pub async fn run( ignore_alternative_branches: true, }; - // TODO [now]: remove this in favor of one passed in as a parameter. - let fake_hack: sc_client_api::in_mem::Blockchain = unimplemented!(); - let potential_parents = cumulus_client_consensus_common::find_potential_parents::( parent_search_params, - &fake_hack, // sp_blockchain::Backend + ¶ms.para_backend, ¶ms.relay_client, ) .await; From 706aaf5e96272f93b821aa23c0246f417c183d7e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 13:33:37 -0500 Subject: [PATCH 17/39] fix compilation --- .../consensus/aura/src/collators/lookahead.rs | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 86aa28ada1d..5e9b0624e6a 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -46,7 +46,7 @@ use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::{CollationResult, MaybeCompressedPoV}; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{Block as PBlock, CollatorPair, Header as PHeader, Id as ParaId}; +use polkadot_primitives::{Block as PBlock, CollatorPair, Header as PHeader, Id as ParaId, OccupiedCoreAssumption}; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; @@ -73,7 +73,7 @@ use sp_state_machine::StorageChanges; use sp_timestamp::Timestamp; use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; -use crate::collator as collator_util; +use crate::collator::{self as collator_util, SlotClaim}; /// Parameters for [`run`]. pub struct Params { @@ -186,7 +186,7 @@ pub async fn run let potential_parents = cumulus_client_consensus_common::find_potential_parents::( parent_search_params, - ¶ms.para_backend, + &*params.para_backend, ¶ms.relay_client, ) .await; @@ -212,12 +212,12 @@ pub async fn run let para_client = &*params.para_client; let keystore = ¶ms.keystore; - let can_build_upon = |block_hash| can_build_upon( + let can_build_upon = |block_hash| can_build_upon::<_, _, P>( slot_now, timestamp, block_hash, included_block, - ¶_client, + para_client, &keystore, ); @@ -225,7 +225,7 @@ pub async fn run // // If the longest chain has space, build upon that. Otherwise, don't // build at all. - potential_parents.sort_by_key(|a| &a.depth); + potential_parents.sort_by_key(|a| a.depth); let initial_parent = match potential_parents.pop() { None => continue, Some(p) => p, @@ -241,8 +241,8 @@ pub async fn run Some(c) => c, }; - let persisted_validation_data = PersistedValidationData { - parent_head: parent_header.encode(), + let validation_data = PersistedValidationData { + parent_head: parent_header.encode().into(), relay_parent_number: *relay_parent_header.number(), relay_parent_storage_root: *relay_parent_header.state_root(), max_pov_size, @@ -252,7 +252,7 @@ pub async fn run // `can_build_upon` fails or building a collation fails. let (parachain_inherent_data, other_inherent_data) = match collator.create_inherent_data( relay_parent, - &persisted_validation_data, + &validation_data, parent_hash, slot_claim.timestamp(), ).await { @@ -263,7 +263,7 @@ pub async fn run Ok(x) => x, }; - let (new_block_hash, new_block_header) = match collator.collate( + match collator.collate( &parent_header, &slot_claim, None, @@ -277,7 +277,7 @@ pub async fn run ).await { Ok((collation, block_data, new_block_hash)) => { parent_hash = new_block_hash; - parent_header = block_data.header; + parent_header = block_data.into_header(); // TODO [now]: announce to parachain sub-network @@ -288,7 +288,7 @@ pub async fn run tracing::error!(target: crate::LOG_TARGET, ?err); break; } - }; + } } } } @@ -315,7 +315,7 @@ where // TODO [now]: new runtime API, // AuraUnincludedSegmentApi::has_space(included_block, slot) or something like it. - unimplemented!() + unimplemented!(); - Some(SlotClaim::unchecked(author_pub, slot, timestamp)) + Some(SlotClaim::unchecked::

(author_pub, slot, timestamp)) } From 28cdd3aa9ae12381f030f2460e14175bd1300fb5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 13:55:39 -0500 Subject: [PATCH 18/39] fix a couple more TODOs --- .../consensus/aura/src/collators/lookahead.rs | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 5e9b0624e6a..66db432807d 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -120,6 +120,14 @@ pub async fn run P::Public: AppPublic + Hash + Member + Encode + Decode, P::Signature: TryFrom> + Hash + Member + Encode + Decode, { + // This is an arbitrary value which is likely guaranteed to exceed any reasonable + // limit, as it would correspond to 10 non-included blocks. + // + // Since we only search for parent blocks which have already been included, + // we can guarantee that all imported blocks respect the unincluded segment + // rules specified by the parachain's runtime and thus will never be too deep. + const PARENT_SEARCH_DEPTH: usize = 10; + let mut params = params; let mut import_notifications = match params.relay_client.import_notification_stream().await { @@ -179,8 +187,8 @@ pub async fn run let parent_search_params = ParentSearchParams { relay_parent, para_id: params.para_id, - ancestry_lookback: unimplemented!(), - max_depth: unimplemented!(), // max unincluded segment len + ancestry_lookback: max_ancestry_lookback(relay_parent, ¶ms.relay_client).await, + max_depth: PARENT_SEARCH_DEPTH, ignore_alternative_branches: true, }; @@ -281,7 +289,7 @@ pub async fn run // TODO [now]: announce to parachain sub-network - // TODO [link to github issue when i have internet]: + // TODO [https://github.com/paritytech/polkadot/issues/5056]: // announce collation to relay-chain validators. } Err(err) => { @@ -319,3 +327,17 @@ where Some(SlotClaim::unchecked::

(author_pub, slot, timestamp)) } + +async fn max_ancestry_lookback( + _relay_parent: PHash, + _relay_client: &impl RelayChainInterface, +) -> usize { + // TODO [https://github.com/paritytech/polkadot/pull/5022] + // We need to read the relay-chain state to know what the maximum + // age truly is, but that depends on those pallets existing. + // + // For now, just provide the conservative value of '2'. + // Overestimating can cause problems, as we'd be building on forks of the + // chain that never get included. Underestimating is less of an issue. + 2 +} From 1a43f1892e14df464c99cc2bac97ee132cd9dd45 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 14:02:50 -0500 Subject: [PATCH 19/39] add an `announce_block` function to collator service --- client/collator/src/service.rs | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/client/collator/src/service.rs b/client/collator/src/service.rs index 7724b0a68a6..fb62e88801a 100644 --- a/client/collator/src/service.rs +++ b/client/collator/src/service.rs @@ -58,12 +58,22 @@ pub trait ServiceInterface { candidate: ParachainCandidate, ) -> Option<(Collation, ParachainBlockData)>; - /// Inform networking systems that the block should be announced after an appropriate - /// signal has been received. This returns the sending half of the signal. + /// Inform networking systems that the block should be announced after a signal has + /// been received to indicate the block has been seconded by a relay-chain validator. + /// + /// This sets up the barrier and returns the sending side of a channel, for the signal + /// to be passed through. fn announce_with_barrier( &self, block_hash: Block::Hash, ) -> oneshot::Sender; + + /// Directly announce a block on the network. + fn announce_block( + &self, + block_hash: Block::Hash, + data: Option>, + ); } /// The [`CollatorService`] provides common utilities for parachain consensus and authoring. @@ -74,6 +84,7 @@ pub trait ServiceInterface { pub struct CollatorService { block_status: Arc, wait_to_announce: Arc>>, + announce_block: Arc>) + Send + Sync>, runtime_api: Arc, } @@ -82,6 +93,7 @@ impl Clone for CollatorService { Self { block_status: self.block_status.clone(), wait_to_announce: self.wait_to_announce.clone(), + announce_block: self.announce_block.clone(), runtime_api: self.runtime_api.clone(), } } @@ -101,9 +113,9 @@ where announce_block: Arc>) + Send + Sync>, runtime_api: Arc, ) -> Self { - let wait_to_announce = Arc::new(Mutex::new(WaitToAnnounce::new(spawner, announce_block))); + let wait_to_announce = Arc::new(Mutex::new(WaitToAnnounce::new(spawner, announce_block.clone()))); - Self { block_status, wait_to_announce, runtime_api } + Self { block_status, wait_to_announce, announce_block, runtime_api } } /// Checks the status of the given block hash in the Parachain. @@ -315,4 +327,12 @@ where ) -> oneshot::Sender { CollatorService::announce_with_barrier(self, block_hash) } + + fn announce_block( + &self, + block_hash: Block::Hash, + data: Option>, + ) { + (self.announce_block)(block_hash, data) + } } From ae0708864c880fc5ff1d452446f6c38a000c8825 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 14:12:30 -0500 Subject: [PATCH 20/39] announce with barrier --- client/consensus/aura/src/collators/lookahead.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 66db432807d..70df7462fc5 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -287,7 +287,9 @@ pub async fn run parent_hash = new_block_hash; parent_header = block_data.into_header(); - // TODO [now]: announce to parachain sub-network + // TODO [now]: we should be able to directly announce, as long as + // we have full nodes do some equivocation checks locally. + let _sender = params.collator_service.announce_with_barrier(new_block_hash); // TODO [https://github.com/paritytech/polkadot/issues/5056]: // announce collation to relay-chain validators. From 3397395a29b956614c718cafba1d2e684ac48035 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 14:18:34 -0500 Subject: [PATCH 21/39] rename block announcement validator to be more specific --- client/network/src/lib.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 0c15ab3add5..9f613b81b3b 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -186,8 +186,16 @@ impl TryFrom<&'_ CollationSecondedSignal> for BlockAnnounceData { } } +/// A type alias for the [`RequireSecondedInBlockAnnounce`] validator. +#[deprecated = "This has been renamed to RequireSecondedInBlockAnnounce"] +pub type BlockAnnounceValidator = RequireSecondedInBlockAnnounce; + /// Parachain specific block announce validator. /// +/// This is not required when the collation mechanism itself is sybil-resistant, as it is a spam protection +/// mechanism used to prevent nodes from dealing with unbounded numbers of blocks. For sybil-resistant +/// collation mechanisms, this will only slow things down. +/// /// This block announce validator is required if the parachain is running /// with the relay chain provided consensus to make sure each node only /// imports a reasonable number of blocks per round. The relay chain provided @@ -214,23 +222,23 @@ impl TryFrom<&'_ CollationSecondedSignal> for BlockAnnounceData { /// it. However, if the announcement is for a block below the tip the announcement is accepted /// as it probably comes from a node that is currently syncing the chain. #[derive(Clone)] -pub struct BlockAnnounceValidator { +pub struct RequireSecondedInBlockAnnounce { phantom: PhantomData, relay_chain_interface: RCInterface, para_id: ParaId, } -impl BlockAnnounceValidator +impl RequireSecondedInBlockAnnounce where RCInterface: Clone, { - /// Create a new [`BlockAnnounceValidator`]. + /// Create a new [`RequireSecondedInBlockAnnounce`]. pub fn new(relay_chain_interface: RCInterface, para_id: ParaId) -> Self { Self { phantom: Default::default(), relay_chain_interface, para_id } } } -impl BlockAnnounceValidator +impl RequireSecondedInBlockAnnounce where RCInterface: RelayChainInterface + Clone, { @@ -315,7 +323,7 @@ where } impl BlockAnnounceValidatorT - for BlockAnnounceValidator + for RequireSecondedInBlockAnnounce where RCInterface: RelayChainInterface + Clone + 'static, { From c6f272d9aa6399b660ca8376dd56e8c7f3f0d0bc Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 14:18:57 -0500 Subject: [PATCH 22/39] fmt --- client/collator/src/service.rs | 15 +-- client/consensus/aura/src/collator.rs | 15 ++- client/consensus/aura/src/collators/basic.rs | 46 ++++----- .../consensus/aura/src/collators/lookahead.rs | 93 +++++++++++-------- client/consensus/common/src/lib.rs | 20 ++-- client/network/src/lib.rs | 24 ++--- 6 files changed, 113 insertions(+), 100 deletions(-) diff --git a/client/collator/src/service.rs b/client/collator/src/service.rs index fb62e88801a..3125fea4248 100644 --- a/client/collator/src/service.rs +++ b/client/collator/src/service.rs @@ -69,11 +69,7 @@ pub trait ServiceInterface { ) -> oneshot::Sender; /// Directly announce a block on the network. - fn announce_block( - &self, - block_hash: Block::Hash, - data: Option>, - ); + fn announce_block(&self, block_hash: Block::Hash, data: Option>); } /// The [`CollatorService`] provides common utilities for parachain consensus and authoring. @@ -113,7 +109,8 @@ where announce_block: Arc>) + Send + Sync>, runtime_api: Arc, ) -> Self { - let wait_to_announce = Arc::new(Mutex::new(WaitToAnnounce::new(spawner, announce_block.clone()))); + let wait_to_announce = + Arc::new(Mutex::new(WaitToAnnounce::new(spawner, announce_block.clone()))); Self { block_status, wait_to_announce, announce_block, runtime_api } } @@ -328,11 +325,7 @@ where CollatorService::announce_with_barrier(self, block_hash) } - fn announce_block( - &self, - block_hash: Block::Hash, - data: Option>, - ) { + fn announce_block(&self, block_hash: Block::Hash, data: Option>) { (self.announce_block)(block_hash, data) } } diff --git a/client/consensus/aura/src/collator.rs b/client/consensus/aura/src/collator.rs index 27ded942634..bc9f616ca13 100644 --- a/client/consensus/aura/src/collator.rs +++ b/client/consensus/aura/src/collator.rs @@ -26,7 +26,9 @@ use codec::{Decode, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker, ParachainCandidate}; +use cumulus_client_consensus_common::{ + self as consensus_common, ParachainBlockImportMarker, ParachainCandidate, +}; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{ relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, @@ -253,16 +255,13 @@ impl SlotClaim { /// /// This does not check whether the author actually owns the slot or the timestamp /// falls within the slot. - pub fn unchecked

(author_pub: Pub, slot: Slot, timestamp: Timestamp) -> Self where + pub fn unchecked

(author_pub: Pub, slot: Slot, timestamp: Timestamp) -> Self + where P: Pair, P::Public: Encode + Decode, - P::Signature: Encode + Decode + P::Signature: Encode + Decode, { - SlotClaim { - author_pub, - timestamp, - pre_digest: aura_internal::pre_digest::

(slot), - } + SlotClaim { author_pub, timestamp, pre_digest: aura_internal::pre_digest::

(slot) } } /// Get the author's public key. diff --git a/client/consensus/aura/src/collators/basic.rs b/client/consensus/aura/src/collators/basic.rs index 837c7e5bb28..235a76de1cf 100644 --- a/client/consensus/aura/src/collators/basic.rs +++ b/client/consensus/aura/src/collators/basic.rs @@ -181,30 +181,34 @@ pub async fn run( }; let (parachain_inherent_data, other_inherent_data) = try_request!( - collator.create_inherent_data( - *request.relay_parent(), - &validation_data, - parent_hash, - claim.timestamp(), - ).await + collator + .create_inherent_data( + *request.relay_parent(), + &validation_data, + parent_hash, + claim.timestamp(), + ) + .await ); let (collation, _, post_hash) = try_request!( - collator.collate( - &parent_header, - &claim, - None, - (parachain_inherent_data, other_inherent_data), - // TODO [https://github.com/paritytech/cumulus/issues/2439] - // We should call out to a pluggable interface that provides - // the proposal duration. - Duration::from_millis(500), - // Set the block limit to 50% of the maximum PoV size. - // - // TODO: If we got benchmarking that includes the proof size, - // we should be able to use the maximum pov size. - (validation_data.max_pov_size / 2) as usize, - ).await + collator + .collate( + &parent_header, + &claim, + None, + (parachain_inherent_data, other_inherent_data), + // TODO [https://github.com/paritytech/cumulus/issues/2439] + // We should call out to a pluggable interface that provides + // the proposal duration. + Duration::from_millis(500), + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + (validation_data.max_pov_size / 2) as usize, + ) + .await ); let result_sender = Some(collator.collator_service().announce_with_barrier(post_hash)); diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 70df7462fc5..48b161a19dd 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -34,8 +34,7 @@ use codec::{Decode, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{ - self as consensus_common, - ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams, + self as consensus_common, ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams, }; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{ @@ -46,7 +45,9 @@ use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::{CollationResult, MaybeCompressedPoV}; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{Block as PBlock, CollatorPair, Header as PHeader, Id as ParaId, OccupiedCoreAssumption}; +use polkadot_primitives::{ + Block as PBlock, CollatorPair, Header as PHeader, Id as ParaId, OccupiedCoreAssumption, +}; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; @@ -163,17 +164,21 @@ pub async fn run // TODO [now]: get asynchronous backing parameters from the relay-chain // runtime. why? for the parent search parameters. - let max_pov_size = match params.relay_client.persisted_validation_data( - relay_parent, - params.para_id, - OccupiedCoreAssumption::Included, - ).await { + let max_pov_size = match params + .relay_client + .persisted_validation_data( + relay_parent, + params.para_id, + OccupiedCoreAssumption::Included, + ) + .await + { Ok(None) => continue, Ok(Some(pvd)) => pvd.max_pov_size, Err(err) => { tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); - continue; - } + continue + }, }; let (slot_now, timestamp) = match consensus_common::relay_slot_and_timestamp( @@ -220,14 +225,16 @@ pub async fn run let para_client = &*params.para_client; let keystore = ¶ms.keystore; - let can_build_upon = |block_hash| can_build_upon::<_, _, P>( - slot_now, - timestamp, - block_hash, - included_block, - para_client, - &keystore, - ); + let can_build_upon = |block_hash| { + can_build_upon::<_, _, P>( + slot_now, + timestamp, + block_hash, + included_block, + para_client, + &keystore, + ) + }; // Sort by depth, ascending, to choose the longest chain. // @@ -258,31 +265,37 @@ pub async fn run // Build and announce collations recursively until // `can_build_upon` fails or building a collation fails. - let (parachain_inherent_data, other_inherent_data) = match collator.create_inherent_data( - relay_parent, - &validation_data, - parent_hash, - slot_claim.timestamp(), - ).await { + let (parachain_inherent_data, other_inherent_data) = match collator + .create_inherent_data( + relay_parent, + &validation_data, + parent_hash, + slot_claim.timestamp(), + ) + .await + { Err(err) => { tracing::error!(target: crate::LOG_TARGET, ?err); - break; + break }, Ok(x) => x, }; - match collator.collate( - &parent_header, - &slot_claim, - None, - (parachain_inherent_data, other_inherent_data), - params.authoring_duration, - // Set the block limit to 50% of the maximum PoV size. - // - // TODO: If we got benchmarking that includes the proof size, - // we should be able to use the maximum pov size. - (validation_data.max_pov_size / 2) as usize, - ).await { + match collator + .collate( + &parent_header, + &slot_claim, + None, + (parachain_inherent_data, other_inherent_data), + params.authoring_duration, + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + (validation_data.max_pov_size / 2) as usize, + ) + .await + { Ok((collation, block_data, new_block_hash)) => { parent_hash = new_block_hash; parent_header = block_data.into_header(); @@ -293,11 +306,11 @@ pub async fn run // TODO [https://github.com/paritytech/polkadot/issues/5056]: // announce collation to relay-chain validators. - } + }, Err(err) => { tracing::error!(target: crate::LOG_TARGET, ?err); - break; - } + break + }, } } } diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index bac7f375216..dbe40405ea7 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -15,7 +15,9 @@ // along with Cumulus. If not, see . use codec::Decode; -use polkadot_primitives::{Block as PBlock, Hash as PHash, Header as PHeader, PersistedValidationData}; +use polkadot_primitives::{ + Block as PBlock, Hash as PHash, Header as PHeader, PersistedValidationData, +}; use cumulus_primitives_core::{relay_chain::OccupiedCoreAssumption, ParaId}; use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; @@ -23,8 +25,8 @@ use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; use sc_client_api::Backend; use sc_consensus::{shared_data::SharedData, BlockImport, ImportResult}; use sp_consensus_slots::{Slot, SlotDuration}; -use sp_timestamp::Timestamp; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_timestamp::Timestamp; use std::sync::Arc; @@ -358,10 +360,12 @@ pub fn relay_slot_and_timestamp( relay_parent_header: &PHeader, relay_chain_slot_duration: SlotDuration, ) -> Option<(Slot, Timestamp)> { - sc_consensus_babe::find_pre_digest::(relay_parent_header).map(|babe_pre_digest| { - let slot = babe_pre_digest.slot(); - let t = Timestamp::new(relay_chain_slot_duration.as_millis() * *slot); - - (slot, t) - }).ok() + sc_consensus_babe::find_pre_digest::(relay_parent_header) + .map(|babe_pre_digest| { + let slot = babe_pre_digest.slot(); + let t = Timestamp::new(relay_chain_slot_duration.as_millis() * *slot); + + (slot, t) + }) + .ok() } diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 9f613b81b3b..74ece935267 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -89,14 +89,13 @@ impl BlockAnnounceData { /// /// This will not check the signature, for this you should use [`BlockAnnounceData::check_signature`]. fn validate(&self, encoded_header: Vec) -> Result<(), Validation> { - let candidate_hash = if let CompactStatement::Seconded(h) = - self.statement.unchecked_payload() - { - h - } else { - tracing::debug!(target: LOG_TARGET, "`CompactStatement` isn't the candidate variant!",); - return Err(Validation::Failure { disconnect: true }) - }; + let candidate_hash = + if let CompactStatement::Seconded(h) = self.statement.unchecked_payload() { + h + } else { + tracing::debug!(target: LOG_TARGET, "`CompactStatement` isn't the candidate variant!",); + return Err(Validation::Failure { disconnect: true }) + }; if *candidate_hash != self.receipt.hash() { tracing::debug!( @@ -188,7 +187,8 @@ impl TryFrom<&'_ CollationSecondedSignal> for BlockAnnounceData { /// A type alias for the [`RequireSecondedInBlockAnnounce`] validator. #[deprecated = "This has been renamed to RequireSecondedInBlockAnnounce"] -pub type BlockAnnounceValidator = RequireSecondedInBlockAnnounce; +pub type BlockAnnounceValidator = + RequireSecondedInBlockAnnounce; /// Parachain specific block announce validator. /// @@ -342,9 +342,9 @@ where let relay_chain_is_syncing = relay_chain_interface .is_major_syncing() .await - .map_err(|e| { - tracing::error!(target: LOG_TARGET, "Unable to determine sync status. {}", e) - }) + .map_err( + |e| tracing::error!(target: LOG_TARGET, "Unable to determine sync status. {}", e), + ) .unwrap_or(false); if relay_chain_is_syncing { From 84b69e9d10b231cc4bf9d9400820b98b7497206d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 14:33:32 -0500 Subject: [PATCH 23/39] clean up unused import errors --- client/consensus/aura/src/collator.rs | 5 +-- client/consensus/aura/src/collators/basic.rs | 38 +++++-------------- .../consensus/aura/src/collators/lookahead.rs | 36 ++++++------------ client/consensus/aura/src/unstable_reimpl.rs | 35 +++-------------- 4 files changed, 29 insertions(+), 85 deletions(-) diff --git a/client/consensus/aura/src/collator.rs b/client/consensus/aura/src/collator.rs index bc9f616ca13..e4344f9934b 100644 --- a/client/consensus/aura/src/collator.rs +++ b/client/consensus/aura/src/collator.rs @@ -37,8 +37,7 @@ use cumulus_primitives_parachain_inherent::ParachainInherentData; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_primitives::{Collation, MaybeCompressedPoV}; -use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{Block as PBlock, Header as PHeader, Id as ParaId}; +use polkadot_primitives::{Header as PHeader, Id as ParaId}; use futures::prelude::*; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; @@ -56,7 +55,7 @@ use sp_runtime::{ }; use sp_state_machine::StorageChanges; use sp_timestamp::Timestamp; -use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; +use std::{convert::TryFrom, error::Error, hash::Hash, sync::Arc, time::Duration}; /// Parameters for instantiating a [`Collator`]. pub struct Params { diff --git a/client/consensus/aura/src/collators/basic.rs b/client/consensus/aura/src/collators/basic.rs index 235a76de1cf..a5f56415420 100644 --- a/client/consensus/aura/src/collators/basic.rs +++ b/client/consensus/aura/src/collators/basic.rs @@ -24,44 +24,28 @@ use codec::{Decode, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{ - ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams, -}; +use cumulus_client_consensus_common::ParachainBlockImportMarker; use cumulus_client_consensus_proposer::ProposerInterface; -use cumulus_primitives_core::{ - relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, -}; -use cumulus_primitives_parachain_inherent::ParachainInherentData; +use cumulus_primitives_core::CollectCollationInfo; use cumulus_relay_chain_interface::RelayChainInterface; -use polkadot_node_primitives::{CollationResult, MaybeCompressedPoV}; +use polkadot_node_primitives::CollationResult; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{Block as PBlock, CollatorPair, Header as PHeader, Id as ParaId}; +use polkadot_primitives::{CollatorPair, Id as ParaId}; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; -use sc_consensus::{ - import_queue::{BasicQueue, Verifier as VerifierT}, - BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction, -}; -use sc_consensus_aura::standalone as aura_internal; -use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sc_consensus::BlockImport; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; -use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::HeaderBackend; -use sp_consensus::{error::Error as ConsensusError, BlockOrigin, SyncOracle}; -use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_consensus::SyncOracle; +use sp_consensus_aura::{AuraApi, SlotDuration}; use sp_core::crypto::Pair; -use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::{ - generic::Digest, - traits::{Block as BlockT, HashFor, Header as HeaderT, Member}, -}; -use sp_state_machine::StorageChanges; -use sp_timestamp::Timestamp; -use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use std::{convert::TryFrom, hash::Hash, sync::Arc, time::Duration}; use crate::collator as collator_util; @@ -107,8 +91,6 @@ pub async fn run( P::Public: AppPublic + Hash + Member + Encode + Decode, P::Signature: TryFrom> + Hash + Member + Encode + Decode, { - let mut params = params; - let mut collation_requests = cumulus_client_collator::relay_chain_driven::init( params.key, params.para_id, diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 48b161a19dd..dcf7a379327 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -34,45 +34,32 @@ use codec::{Decode, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{ - self as consensus_common, ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams, + self as consensus_common, ParachainBlockImportMarker, ParentSearchParams, }; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{ relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, }; -use cumulus_primitives_parachain_inherent::ParachainInherentData; use cumulus_relay_chain_interface::RelayChainInterface; -use polkadot_node_primitives::{CollationResult, MaybeCompressedPoV}; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{ - Block as PBlock, CollatorPair, Header as PHeader, Id as ParaId, OccupiedCoreAssumption, -}; +use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; -use sc_consensus::{ - import_queue::{BasicQueue, Verifier as VerifierT}, - BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction, -}; +use sc_consensus::BlockImport; use sc_consensus_aura::standalone as aura_internal; -use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; -use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::HeaderBackend; -use sp_consensus::{error::Error as ConsensusError, BlockOrigin, SyncOracle}; +use sp_consensus::SyncOracle; use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; use sp_core::crypto::Pair; -use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::{ - generic::Digest, - traits::{Block as BlockT, HashFor, Header as HeaderT, Member}, -}; -use sp_state_machine::StorageChanges; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; use sp_timestamp::Timestamp; -use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; +use std::{convert::TryFrom, hash::Hash, sync::Arc, time::Duration}; use crate::collator::{self as collator_util, SlotClaim}; @@ -129,8 +116,6 @@ pub async fn run // rules specified by the parachain's runtime and thus will never be too deep. const PARENT_SEARCH_DEPTH: usize = 10; - let mut params = params; - let mut import_notifications = match params.relay_client.import_notification_stream().await { Ok(s) => s, Err(err) => { @@ -186,7 +171,7 @@ pub async fn run params.relay_chain_slot_duration, ) { None => continue, - Some((s, t)) => (Slot::from_timestamp(t, params.slot_duration), t), + Some((_, t)) => (Slot::from_timestamp(t, params.slot_duration), t), }; let parent_search_params = ParentSearchParams { @@ -296,13 +281,14 @@ pub async fn run ) .await { - Ok((collation, block_data, new_block_hash)) => { + Ok((_collation, block_data, new_block_hash)) => { parent_hash = new_block_hash; parent_header = block_data.into_header(); // TODO [now]: we should be able to directly announce, as long as // we have full nodes do some equivocation checks locally. - let _sender = params.collator_service.announce_with_barrier(new_block_hash); + // The equivocation checks should allow up to `v + 1` equivocations. + let _sender = collator.collator_service().announce_with_barrier(new_block_hash); // TODO [https://github.com/paritytech/polkadot/issues/5056]: // announce collation to relay-chain validators. diff --git a/client/consensus/aura/src/unstable_reimpl.rs b/client/consensus/aura/src/unstable_reimpl.rs index ceb0e42bf88..29d0667591a 100644 --- a/client/consensus/aura/src/unstable_reimpl.rs +++ b/client/consensus/aura/src/unstable_reimpl.rs @@ -17,45 +17,22 @@ // TODO [now]: docs + rename file here use codec::{Decode, Encode}; -use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{ - ParachainBlockImportMarker, ParachainCandidate, ParentSearchParams, -}; -use cumulus_client_consensus_proposer::ProposerInterface; -use cumulus_primitives_core::{ - relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, -}; -use cumulus_primitives_parachain_inherent::ParachainInherentData; -use cumulus_relay_chain_interface::RelayChainInterface; +use cumulus_client_consensus_common::ParachainBlockImportMarker; -use polkadot_node_primitives::{CollationResult, MaybeCompressedPoV}; -use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{Block as PBlock, CollatorPair, Header as PHeader, Id as ParaId}; - -use futures::prelude::*; -use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; use sc_consensus::{ import_queue::{BasicQueue, Verifier as VerifierT}, - BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction, + BlockImport, BlockImportParams, ForkChoiceStrategy, }; use sc_consensus_aura::standalone as aura_internal; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; use sp_api::ProvideRuntimeApi; -use sp_application_crypto::AppPublic; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_blockchain::HeaderBackend; -use sp_consensus::{error::Error as ConsensusError, BlockOrigin, SyncOracle}; +use sp_consensus::error::Error as ConsensusError; use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; use sp_core::crypto::Pair; -use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; -use sp_keystore::KeystorePtr; -use sp_runtime::{ - generic::Digest, - traits::{Block as BlockT, HashFor, Header as HeaderT, Member}, -}; -use sp_state_machine::StorageChanges; -use sp_timestamp::Timestamp; -use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use std::{fmt::Debug, sync::Arc}; struct Verifier { client: Arc, From a3984a7d84dd40b25ce128af4e7d37bac34d5866 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 14:36:50 -0500 Subject: [PATCH 24/39] update references to BlockAnnounceValidator --- client/network/src/lib.rs | 2 +- client/network/src/tests.rs | 4 ++-- client/service/src/lib.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 74ece935267..74cb0461b3b 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -17,7 +17,7 @@ //! Parachain specific networking //! //! Provides a custom block announcement implementation for parachains -//! that use the relay chain provided consensus. See [`BlockAnnounceValidator`] +//! that use the relay chain provided consensus. See [`RequireSecondedInBlockAnnounce`] //! and [`WaitToAnnounce`] for more information about this implementation. use sp_consensus::block_validation::{ diff --git a/client/network/src/tests.rs b/client/network/src/tests.rs index f0e34a0fbe6..18fa7511eb3 100644 --- a/client/network/src/tests.rs +++ b/client/network/src/tests.rs @@ -244,10 +244,10 @@ impl RelayChainInterface for DummyRelayChainInterface { } fn make_validator_and_api( -) -> (BlockAnnounceValidator>, Arc) { +) -> (RequireSecondedInBlockAnnounce>, Arc) { let relay_chain_interface = Arc::new(DummyRelayChainInterface::new()); ( - BlockAnnounceValidator::new(relay_chain_interface.clone(), ParaId::from(56)), + RequireSecondedInBlockAnnounce::new(relay_chain_interface.clone(), ParaId::from(56)), relay_chain_interface, ) } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 42b9916d468..f3328343a3e 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -20,7 +20,7 @@ use cumulus_client_cli::CollatorOptions; use cumulus_client_consensus_common::ParachainConsensus; -use cumulus_client_network::BlockAnnounceValidator; +use cumulus_client_network::RequireSecondedInBlockAnnounce; use cumulus_client_pov_recovery::{PoVRecovery, RecoveryDelayRange, RecoveryHandle}; use cumulus_primitives_core::{CollectCollationInfo, ParaId}; use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; @@ -361,7 +361,7 @@ where _ => None, }; - let block_announce_validator = BlockAnnounceValidator::new(relay_chain_interface, para_id); + let block_announce_validator = RequireSecondedInBlockAnnounce::new(relay_chain_interface, para_id); let block_announce_validator_builder = move |_| Box::new(block_announce_validator) as Box<_>; sc_service::build_network(sc_service::BuildNetworkParams { From 7489c94ab92a821d528287440b1a214c9e966571 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 17:28:38 -0500 Subject: [PATCH 25/39] rename unstable_reimpl --- .../{unstable_reimpl.rs => equivocation_import_queue.rs} | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) rename client/consensus/aura/src/{unstable_reimpl.rs => equivocation_import_queue.rs} (95%) diff --git a/client/consensus/aura/src/unstable_reimpl.rs b/client/consensus/aura/src/equivocation_import_queue.rs similarity index 95% rename from client/consensus/aura/src/unstable_reimpl.rs rename to client/consensus/aura/src/equivocation_import_queue.rs index 29d0667591a..7d3125518e1 100644 --- a/client/consensus/aura/src/unstable_reimpl.rs +++ b/client/consensus/aura/src/equivocation_import_queue.rs @@ -14,7 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -// TODO [now]: docs + rename file here +/// An import queue which provides some equivocation resistance with lenient trait bounds. +/// +/// Equivocation resistance in general is a hard problem, as different nodes in the network +/// may see equivocations in a different order, and therefore may not agree on which blocks +/// should be thrown out and which ones should be kept. use codec::{Decode, Encode}; use cumulus_client_consensus_common::ParachainBlockImportMarker; From 67ef6c729fa2c6100e217ef5068f1e099359915a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 17:29:40 -0500 Subject: [PATCH 26/39] add AuraUnincludedSegmentApi --- Cargo.lock | 13 +++++++++++ Cargo.toml | 1 + primitives/aura/Cargo.toml | 30 +++++++++++++++++++++++++ primitives/aura/src/lib.rs | 46 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 90 insertions(+) create mode 100644 primitives/aura/Cargo.toml create mode 100644 primitives/aura/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 6a9a277746d..47d77501114 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2576,6 +2576,19 @@ dependencies = [ "xcm", ] +[[package]] +name = "cumulus-primitives-aura" +version = "0.1.0" +dependencies = [ + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-primitives", + "sp-api", + "sp-consensus-aura", + "sp-runtime", + "sp-std", +] + [[package]] name = "cumulus-primitives-core" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 2e96126cfa6..246fea0cba0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ members = [ "pallets/xcmp-queue", "parachain-template/node", "parachain-template/runtime", + "primitives/aura", "primitives/core", "primitives/parachain-inherent", "primitives/timestamp", diff --git a/primitives/aura/Cargo.toml b/primitives/aura/Cargo.toml new file mode 100644 index 00000000000..ca6eadf25f1 --- /dev/null +++ b/primitives/aura/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "cumulus-primitives-aura" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2021" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } + +# Substrate +sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } + +# Polkadot +polkadot-core-primitives = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "sp-api/std", + "sp-consensus-aura/std", + "sp-runtime/std", + "sp-std/std", + "polkadot-core-primitives/std", + "polkadot-primitives/std", +] diff --git a/primitives/aura/src/lib.rs b/primitives/aura/src/lib.rs new file mode 100644 index 00000000000..ef0df181b71 --- /dev/null +++ b/primitives/aura/src/lib.rs @@ -0,0 +1,46 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Core primitives for Aura in Cumulus. +//! +//! In particular, this exposes the [`AuraUnincludedSegmentApi`] which is used to regulate +//! the behavior of Aura within a parachain context. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use sp_consensus_aura::Slot; + +sp_api::decl_runtime_apis! { + /// This runtime API is used to inform potential block authors whether they will + /// have the right to author at a slot, assuming they have claimed the slot. + /// + /// In particular, this API allows Aura-based parachains to regulate their "unincluded segment", + /// which is the section of the head of the chain which has not yet been made available in the + /// relay chain. + /// + /// When the unincluded segment is short, Aura chains will allow authors to create multiple + /// blocks per slot in order to build a backlog. When it is saturated, this API will limit + /// the amount of blocks that can be created. + pub trait AuraUnincludedSegmentApi { + /// Whether it is legal to extend the chain, assuming the given block is the most + /// recently included one as-of the relay parent that will be built against, and + /// the given slot. + /// + /// This should be consistent with the logic the runtime uses when validating blocks to + /// avoid issues. + fn can_build_upon(included_hash: Block::Hash, slot: Slot) -> bool; + } +} From 00fa3180160f4083bae1b734253d8e08436d3ba5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 17:35:33 -0500 Subject: [PATCH 27/39] finish rename --- primitives/aura/src/lib.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/primitives/aura/src/lib.rs b/primitives/aura/src/lib.rs index ef0df181b71..a0d7a0206a6 100644 --- a/primitives/aura/src/lib.rs +++ b/primitives/aura/src/lib.rs @@ -41,6 +41,10 @@ sp_api::decl_runtime_apis! { /// /// This should be consistent with the logic the runtime uses when validating blocks to /// avoid issues. + /// + /// When the unincluded segment is empty, i.e. `included_hash == at`, where at is the block + /// whose state we are querying against, this must always return `true` as long as the slot + /// is more recent than the included block itself. fn can_build_upon(included_hash: Block::Hash, slot: Slot) -> bool; } } From be1e3a75752d095df8e7b44cef85d72f60a6c84b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 17:35:48 -0500 Subject: [PATCH 28/39] integrate AuraUnincludedSegmentApi --- Cargo.lock | 1 + client/consensus/aura/Cargo.toml | 1 + .../consensus/aura/src/collators/lookahead.rs | 19 ++++++++++++------- client/consensus/aura/src/lib.rs | 2 +- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47d77501114..8b27416d2fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2222,6 +2222,7 @@ dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", + "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-relay-chain-interface", diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 7d6da6a09da..fdd1ff34c43 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -36,6 +36,7 @@ substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate cumulus-client-consensus-common = { path = "../common" } cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } cumulus-client-consensus-proposer = { path = "../proposer" } +cumulus-primitives-aura = { path = "../../../primitives/aura" } cumulus-primitives-core = { path = "../../../primitives/core" } cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent" } cumulus-client-collator = { path = "../../collator" } diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index dcf7a379327..43bb49fd383 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -40,6 +40,7 @@ use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{ relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, }; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_overseer::Handle as OverseerHandle; @@ -95,7 +96,7 @@ pub async fn run + Send + Sync + 'static, - Client::Api: AuraApi + CollectCollationInfo, + Client::Api: AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, Backend: sp_blockchain::Backend, RClient: RelayChainInterface, CIDP: CreateInherentDataProviders + 'static, @@ -307,24 +308,28 @@ pub async fn run async fn can_build_upon( slot: Slot, timestamp: Timestamp, - block_hash: Block::Hash, + parent_hash: Block::Hash, included_block: Block::Hash, client: &Client, keystore: &KeystorePtr, ) -> Option> where Client: ProvideRuntimeApi, - Client::Api: AuraApi, + Client::Api: AuraApi + AuraUnincludedSegmentApi, P: Pair, P::Public: Encode + Decode, P::Signature: Encode + Decode, { - let authorities = client.runtime_api().authorities(block_hash).ok()?; + let runtime_api = client.runtime_api(); + let authorities = runtime_api.authorities(parent_hash).ok()?; let author_pub = aura_internal::claim_slot::

(slot, &authorities, keystore).await?; - // TODO [now]: new runtime API, - // AuraUnincludedSegmentApi::has_space(included_block, slot) or something like it. - unimplemented!(); + // Here we lean on the property that building on an empty unincluded segment must always + // be legal. Skipping the runtime API query here allows us to seamlessly run this + // collator against chains which have not yet upgraded their runtime. + if parent_hash != included_block { + runtime_api.can_build_upon(parent_hash, included_block, slot).ok()?; + } Some(SlotClaim::unchecked::

(author_pub, slot, timestamp)) } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 53b61a56355..51d54bf5714 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -52,7 +52,7 @@ pub use sc_consensus_slots::InherentDataProviderExt; pub mod collator; pub mod collators; -pub mod unstable_reimpl; +pub mod equivocation_import_queue; const LOG_TARGET: &str = "aura::cumulus"; From a6a0f802d1b7b7da14e427cd1528f2d7b7db7da3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 17:51:42 -0500 Subject: [PATCH 29/39] add a new block announcement validator for backwards compatibility --- client/network/src/lib.rs | 48 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 74cb0461b3b..cfe28ef1d60 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -461,3 +461,51 @@ async fn wait_to_announce( ); } } + +/// A [`BlockAnnounceValidator`] which accepts all block announcements, as it assumes +/// sybil resistance is handled elsewhere. +#[derive(Debug, Clone)] +pub struct AssumeSybilResistance(bool); + +impl AssumeSybilResistance { + /// Instantiate this block announcement validator while permissively allowing (but ignoring) + /// announcements which come tagged with seconded messages. + /// + /// This is useful for backwards compatibility when upgrading nodes: old nodes will continue + /// to broadcast announcements with seconded messages, so these announcements shouldn't be ignored + /// and the peers not punished. + pub fn allow_seconded_messages() -> Self { + AssumeSybilResistance(true) + } + + /// Instantiate this block announcement validator while rejecting announcements that come with + /// data. + pub fn reject_seconded_messages() -> Self { + AssumeSybilResistance(false) + } +} + +impl BlockAnnounceValidatorT for AssumeSybilResistance { + fn validate( + &mut self, + _header: &Block::Header, + data: &[u8], + ) -> Pin> + Send>> { + let allow_seconded_messages = self.0; + let data = data.to_vec(); + + async move { + Ok(if data.is_empty() { + Validation::Success { is_new_best: false } + } else if !allow_seconded_messages { + Validation::Failure { disconnect: false } + } else { + match BlockAnnounceData::decode_all(&mut data.as_slice()) { + Ok(_) => Validation::Success { is_new_best: false }, + Err(_) => Validation::Failure { disconnect: true }, + } + }) + } + .boxed() + } +} From 3afbfe73045d88725814142456f107052bb90e5f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 18:05:19 -0500 Subject: [PATCH 30/39] add some naive equivocation defenses --- Cargo.lock | 1 + client/consensus/aura/Cargo.toml | 1 + .../consensus/aura/src/collators/lookahead.rs | 4 +- .../aura/src/equivocation_import_queue.rs | 43 ++++++++++++++++++- 4 files changed, 45 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8b27416d2fe..b82e6f94232 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2227,6 +2227,7 @@ dependencies = [ "cumulus-primitives-parachain-inherent", "cumulus-relay-chain-interface", "futures", + "lru 0.10.0", "parity-scale-codec", "polkadot-node-primitives", "polkadot-overseer", diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index fdd1ff34c43..3599cb9a03d 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -10,6 +10,7 @@ async-trait = "0.1.68" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } futures = "0.3.28" tracing = "0.1.37" +lru = "0.10.0" # Substrate sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 43bb49fd383..4b06fb7d9f7 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -112,7 +112,7 @@ pub async fn run // This is an arbitrary value which is likely guaranteed to exceed any reasonable // limit, as it would correspond to 10 non-included blocks. // - // Since we only search for parent blocks which have already been included, + // Since we only search for parent blocks which have already been imported, // we can guarantee that all imported blocks respect the unincluded segment // rules specified by the parachain's runtime and thus will never be too deep. const PARENT_SEARCH_DEPTH: usize = 10; @@ -344,6 +344,6 @@ async fn max_ancestry_lookback( // // For now, just provide the conservative value of '2'. // Overestimating can cause problems, as we'd be building on forks of the - // chain that never get included. Underestimating is less of an issue. + // chain that can never get included. Underestimating is less of an issue. 2 } diff --git a/client/consensus/aura/src/equivocation_import_queue.rs b/client/consensus/aura/src/equivocation_import_queue.rs index 7d3125518e1..8b38221d03c 100644 --- a/client/consensus/aura/src/equivocation_import_queue.rs +++ b/client/consensus/aura/src/equivocation_import_queue.rs @@ -21,6 +21,7 @@ /// should be thrown out and which ones should be kept. use codec::{Decode, Encode}; +use lru::LruCache; use cumulus_client_consensus_common::ParachainBlockImportMarker; use sc_consensus::{ @@ -36,12 +37,41 @@ use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; use sp_core::crypto::Pair; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, num::NonZeroUsize, sync::Arc}; + +const LRU_WINDOW: usize = 256; +const EQUIVOCATION_LIMIT: usize = 16; + +struct NaiveEquivocationDefender { + cache: LruCache, +} + +impl Default for NaiveEquivocationDefender { + fn default() -> Self { + NaiveEquivocationDefender { + cache: LruCache::new(NonZeroUsize::new(LRU_WINDOW).expect("window > 0; qed")), + } + } +} + +impl NaiveEquivocationDefender { + // return `true` if equivocation is beyond the limit. + fn insert_and_check(&mut self, slot: Slot) -> bool { + let val = self.cache.get_or_insert_mut(*slot, || 0); + if *val == EQUIVOCATION_LIMIT { + true + } else { + *val += 1; + false + } + } +} struct Verifier { client: Arc, create_inherent_data_providers: CIDP, slot_duration: SlotDuration, + defender: NaiveEquivocationDefender, telemetry: Option, _marker: std::marker::PhantomData<(Block, P)>, } @@ -88,7 +118,7 @@ where ); match res { - Ok((pre_header, _slot, seal_digest)) => { + Ok((pre_header, slot, seal_digest)) => { telemetry!( self.telemetry; CONSENSUS_TRACE; @@ -100,6 +130,14 @@ where block_params.post_digests.push(seal_digest); block_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); block_params.post_hash = Some(post_hash); + + // Check for and reject egregious amounts of equivocations. + if self.defender.insert_and_check(slot) { + return Err(format!( + "Rejecting block {:?} due to excessive equivocations at slot", + post_hash, + )); + } }, Err(aura_internal::SealVerificationError::Deferred(hdr, slot)) => { telemetry!( @@ -207,6 +245,7 @@ where let verifier = Verifier:: { client, create_inherent_data_providers, + defender: NaiveEquivocationDefender::default(), slot_duration, telemetry, _marker: std::marker::PhantomData, From 89af9a6beeab877138a3719a1c83d22fddfd128b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 18:05:37 -0500 Subject: [PATCH 31/39] rustfmt --- client/consensus/aura/src/collators/lookahead.rs | 5 +++-- client/consensus/aura/src/equivocation_import_queue.rs | 5 ++--- client/network/src/tests.rs | 6 ++++-- client/service/src/lib.rs | 3 ++- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 4b06fb7d9f7..1ec21fb0a84 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -37,10 +37,10 @@ use cumulus_client_consensus_common::{ self as consensus_common, ParachainBlockImportMarker, ParentSearchParams, }; use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::{ relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, }; -use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_overseer::Handle as OverseerHandle; @@ -96,7 +96,8 @@ pub async fn run + Send + Sync + 'static, - Client::Api: AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Client::Api: + AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, Backend: sp_blockchain::Backend, RClient: RelayChainInterface, CIDP: CreateInherentDataProviders + 'static, diff --git a/client/consensus/aura/src/equivocation_import_queue.rs b/client/consensus/aura/src/equivocation_import_queue.rs index 8b38221d03c..ac9c2a52829 100644 --- a/client/consensus/aura/src/equivocation_import_queue.rs +++ b/client/consensus/aura/src/equivocation_import_queue.rs @@ -19,10 +19,9 @@ /// Equivocation resistance in general is a hard problem, as different nodes in the network /// may see equivocations in a different order, and therefore may not agree on which blocks /// should be thrown out and which ones should be kept. - use codec::{Decode, Encode}; -use lru::LruCache; use cumulus_client_consensus_common::ParachainBlockImportMarker; +use lru::LruCache; use sc_consensus::{ import_queue::{BasicQueue, Verifier as VerifierT}, @@ -136,7 +135,7 @@ where return Err(format!( "Rejecting block {:?} due to excessive equivocations at slot", post_hash, - )); + )) } }, Err(aura_internal::SealVerificationError::Deferred(hdr, slot)) => { diff --git a/client/network/src/tests.rs b/client/network/src/tests.rs index 18fa7511eb3..d9254634dd4 100644 --- a/client/network/src/tests.rs +++ b/client/network/src/tests.rs @@ -243,8 +243,10 @@ impl RelayChainInterface for DummyRelayChainInterface { } } -fn make_validator_and_api( -) -> (RequireSecondedInBlockAnnounce>, Arc) { +fn make_validator_and_api() -> ( + RequireSecondedInBlockAnnounce>, + Arc, +) { let relay_chain_interface = Arc::new(DummyRelayChainInterface::new()); ( RequireSecondedInBlockAnnounce::new(relay_chain_interface.clone(), ParaId::from(56)), diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index f3328343a3e..117e203d1ab 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -361,7 +361,8 @@ where _ => None, }; - let block_announce_validator = RequireSecondedInBlockAnnounce::new(relay_chain_interface, para_id); + let block_announce_validator = + RequireSecondedInBlockAnnounce::new(relay_chain_interface, para_id); let block_announce_validator_builder = move |_| Box::new(block_announce_validator) as Box<_>; sc_service::build_network(sc_service::BuildNetworkParams { From b7d8a9cd55c4724e462a008126281aa435a0d102 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 7 Jun 2023 18:18:46 -0500 Subject: [PATCH 32/39] clean up remaining TODO [now]s --- .../consensus/aura/src/collators/lookahead.rs | 12 ++++------ client/consensus/common/src/lib.rs | 24 ++++++++++--------- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs index 1ec21fb0a84..50fdb8b34ba 100644 --- a/client/consensus/aura/src/collators/lookahead.rs +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -148,9 +148,6 @@ pub async fn run while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); - // TODO [now]: get asynchronous backing parameters from the relay-chain - // runtime. why? for the parent search parameters. - let max_pov_size = match params .relay_client .persisted_validation_data( @@ -287,10 +284,9 @@ pub async fn run parent_hash = new_block_hash; parent_header = block_data.into_header(); - // TODO [now]: we should be able to directly announce, as long as - // we have full nodes do some equivocation checks locally. - // The equivocation checks should allow up to `v + 1` equivocations. - let _sender = collator.collator_service().announce_with_barrier(new_block_hash); + // Here we are assuming that the import logic protects against equivocations + // and provides sybil-resistance, as it should. + collator.collator_service().announce_block(new_block_hash, None); // TODO [https://github.com/paritytech/polkadot/issues/5056]: // announce collation to relay-chain validators. @@ -339,7 +335,7 @@ async fn max_ancestry_lookback( _relay_parent: PHash, _relay_client: &impl RelayChainInterface, ) -> usize { - // TODO [https://github.com/paritytech/polkadot/pull/5022] + // TODO [https://github.com/paritytech/cumulus/issues/2706] // We need to read the relay-chain state to know what the maximum // age truly is, but that depends on those pallets existing. // diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index dbe40405ea7..278d8affc7b 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -208,11 +208,9 @@ pub struct PotentialParent { pub header: B::Header, /// The depth of the block. pub depth: usize, - /// Whether the block descends from the block pending availability. - /// - /// This is false for the last included block as well as the block pending availability itself. - // TODO [now]: change this to be true for the pending blocks themselves. - pub descends_from_pending: bool, + /// Whether the block is the included block, is itself pending on-chain, or descends + /// from the block pending availability. + pub aligned_with_pending: bool, } /// Perform a recursive search through blocks to find potential @@ -295,7 +293,7 @@ pub async fn find_potential_parents( hash: included_hash, header: included_header, depth: 0, - descends_from_pending: false, + aligned_with_pending: true, }]; // Recursive search through descendants of the included block which have acceptable @@ -317,7 +315,7 @@ pub async fn find_potential_parents( .map_or(false, is_root_in_ancestry) }; - let descends_from_pending = entry.descends_from_pending; + let parent_aligned_with_pending = entry.aligned_with_pending; let child_depth = entry.depth + 1; let hash = entry.hash; @@ -331,9 +329,13 @@ pub async fn find_potential_parents( // push children onto search frontier. for child in client.children(hash).ok().into_iter().flat_map(|c| c) { - if params.ignore_alternative_branches && - is_included && pending_hash.map_or(false, |h| child != h) - { + let aligned_with_pending = parent_aligned_with_pending && if child_depth == 1 { + pending_hash.as_ref().map_or(true, |h| &child == h) + } else { + true + }; + + if params.ignore_alternative_branches && !aligned_with_pending { continue } @@ -347,7 +349,7 @@ pub async fn find_potential_parents( hash: child, header, depth: child_depth, - descends_from_pending: is_pending || descends_from_pending, + aligned_with_pending, }); } } From f56b39262677efe4abb86c3628e10be8cf177f39 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Jun 2023 14:44:00 -0700 Subject: [PATCH 33/39] fmt --- client/consensus/common/src/lib.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index 278d8affc7b..f0016779295 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -329,11 +329,12 @@ pub async fn find_potential_parents( // push children onto search frontier. for child in client.children(hash).ok().into_iter().flat_map(|c| c) { - let aligned_with_pending = parent_aligned_with_pending && if child_depth == 1 { - pending_hash.as_ref().map_or(true, |h| &child == h) - } else { - true - }; + let aligned_with_pending = parent_aligned_with_pending && + if child_depth == 1 { + pending_hash.as_ref().map_or(true, |h| &child == h) + } else { + true + }; if params.ignore_alternative_branches && !aligned_with_pending { continue From 0101f91c5d36fb36237792c1dd008536e67621ae Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Jun 2023 17:37:42 -0700 Subject: [PATCH 34/39] try to fix inprocess-interface --- client/relay-chain-inprocess-interface/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/relay-chain-inprocess-interface/src/lib.rs b/client/relay-chain-inprocess-interface/src/lib.rs index 627b2b97992..9c97fb27f39 100644 --- a/client/relay-chain-inprocess-interface/src/lib.rs +++ b/client/relay-chain-inprocess-interface/src/lib.rs @@ -172,7 +172,7 @@ where } async fn header(&self, block_id: PHash) -> RelayChainResult> { - Ok(self.backend.header(block_hash)?) + Ok(self.backend.header(block_id)?) } fn overseer_handle(&self) -> RelayChainResult { From e00621fef26bbaf9ebdd6646936101a21e696ac2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 19 Jun 2023 18:20:28 -0700 Subject: [PATCH 35/39] actually fix compilation --- client/relay-chain-inprocess-interface/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/relay-chain-inprocess-interface/src/lib.rs b/client/relay-chain-inprocess-interface/src/lib.rs index 9c97fb27f39..c9b8bdf149c 100644 --- a/client/relay-chain-inprocess-interface/src/lib.rs +++ b/client/relay-chain-inprocess-interface/src/lib.rs @@ -172,7 +172,7 @@ where } async fn header(&self, block_id: PHash) -> RelayChainResult> { - Ok(self.backend.header(block_id)?) + Ok(self.backend.blockchain().header(block_id)?) } fn overseer_handle(&self) -> RelayChainResult { From 70caa2034e89f28cd5e7f11e4a360ad89d1b29bf Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Mon, 3 Jul 2023 14:51:11 +0200 Subject: [PATCH 36/39] ignored -> rejected rephrase --- client/network/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index cfe28ef1d60..7783ba13b6c 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -472,7 +472,7 @@ impl AssumeSybilResistance { /// announcements which come tagged with seconded messages. /// /// This is useful for backwards compatibility when upgrading nodes: old nodes will continue - /// to broadcast announcements with seconded messages, so these announcements shouldn't be ignored + /// to broadcast announcements with seconded messages, so these announcements shouldn't be rejected /// and the peers not punished. pub fn allow_seconded_messages() -> Self { AssumeSybilResistance(true) From 086eff00455bf0158c145382b4518cd160e7ab50 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 5 Jul 2023 14:04:22 +0200 Subject: [PATCH 37/39] fix test compilation --- client/consensus/common/src/tests.rs | 2 +- client/relay-chain-inprocess-interface/src/lib.rs | 11 ++++------- client/relay-chain-rpc-interface/src/lib.rs | 12 ++++-------- 3 files changed, 9 insertions(+), 16 deletions(-) diff --git a/client/consensus/common/src/tests.rs b/client/consensus/common/src/tests.rs index e8963a2cfbf..0777d6eb694 100644 --- a/client/consensus/common/src/tests.rs +++ b/client/consensus/common/src/tests.rs @@ -185,7 +185,7 @@ impl RelayChainInterface for Relaychain { unimplemented!("Not needed for test") } - async fn wait_for_block(&self, _: PHash) -> RelayChainResult<()> {} + async fn wait_for_block(&self, _: PHash) -> RelayChainResult<()> { Ok(()) } async fn new_best_notification_stream( &self, diff --git a/client/relay-chain-inprocess-interface/src/lib.rs b/client/relay-chain-inprocess-interface/src/lib.rs index de4446b575f..a7bd39f7e6f 100644 --- a/client/relay-chain-inprocess-interface/src/lib.rs +++ b/client/relay-chain-inprocess-interface/src/lib.rs @@ -94,9 +94,10 @@ impl RelayChainInterface for RelayChainInProcessInterface { async fn header(&self, block_id: BlockId) -> RelayChainResult> { let hash = match block_id { BlockId::Hash(hash) => hash, - BlockId::Number(num) => self.full_client.hash(num)?.ok_or_else(|| { - RelayChainError::GenericError(format!("block with number {num} not found")) - })?, + BlockId::Number(num) => match self.full_client.hash(num)? { + None => return Ok(None), + Some(h) => h, + } }; let header = self.full_client.header(hash)?; @@ -164,10 +165,6 @@ impl RelayChainInterface for RelayChainInProcessInterface { Ok(self.sync_oracle.is_major_syncing()) } - async fn header(&self, block_id: PHash) -> RelayChainResult> { - Ok(self.backend.blockchain().header(block_id)?) - } - fn overseer_handle(&self) -> RelayChainResult { Ok(self.overseer_handle.clone()) } diff --git a/client/relay-chain-rpc-interface/src/lib.rs b/client/relay-chain-rpc-interface/src/lib.rs index ae10c39df71..4456c80e360 100644 --- a/client/relay-chain-rpc-interface/src/lib.rs +++ b/client/relay-chain-rpc-interface/src/lib.rs @@ -81,10 +81,10 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn header(&self, block_id: BlockId) -> RelayChainResult> { let hash = match block_id { BlockId::Hash(hash) => hash, - BlockId::Number(num) => - self.rpc_client.chain_get_block_hash(Some(num)).await?.ok_or_else(|| { - RelayChainError::GenericError(format!("block with number {num} not found")) - })?, + BlockId::Number(num) => match self.rpc_client.chain_get_block_hash(Some(num)).await? { + None => return Ok(None), + Some(h) => h, + }, }; let header = self.rpc_client.chain_get_header(Some(hash)).await?; @@ -148,10 +148,6 @@ impl RelayChainInterface for RelayChainRpcInterface { self.rpc_client.system_health().await.map(|h| h.is_syncing) } - async fn header(&self, block_id: RelayHash) -> RelayChainResult> { - self.rpc_client.chain_get_header(Some(block_id)).await - } - fn overseer_handle(&self) -> RelayChainResult { Ok(self.overseer_handle.clone()) } From e2319c2f34c9dcb32647debfba22b8cce4c18833 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 5 Jul 2023 15:04:18 +0200 Subject: [PATCH 38/39] fmt --- client/consensus/common/src/tests.rs | 4 +++- client/relay-chain-inprocess-interface/src/lib.rs | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/client/consensus/common/src/tests.rs b/client/consensus/common/src/tests.rs index 0777d6eb694..c13f839ad82 100644 --- a/client/consensus/common/src/tests.rs +++ b/client/consensus/common/src/tests.rs @@ -185,7 +185,9 @@ impl RelayChainInterface for Relaychain { unimplemented!("Not needed for test") } - async fn wait_for_block(&self, _: PHash) -> RelayChainResult<()> { Ok(()) } + async fn wait_for_block(&self, _: PHash) -> RelayChainResult<()> { + Ok(()) + } async fn new_best_notification_stream( &self, diff --git a/client/relay-chain-inprocess-interface/src/lib.rs b/client/relay-chain-inprocess-interface/src/lib.rs index a7bd39f7e6f..9dfe9e9a4f1 100644 --- a/client/relay-chain-inprocess-interface/src/lib.rs +++ b/client/relay-chain-inprocess-interface/src/lib.rs @@ -97,7 +97,7 @@ impl RelayChainInterface for RelayChainInProcessInterface { BlockId::Number(num) => match self.full_client.hash(num)? { None => return Ok(None), Some(h) => h, - } + }, }; let header = self.full_client.header(hash)?; From d0ab00ce94c35967cfec136599a62cfd9dba4cbf Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 11 Jul 2023 12:53:17 +0200 Subject: [PATCH 39/39] clippy --- client/consensus/aura/src/collator.rs | 2 +- client/consensus/common/src/lib.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/client/consensus/aura/src/collator.rs b/client/consensus/aura/src/collator.rs index e4344f9934b..aa990ae6d3a 100644 --- a/client/consensus/aura/src/collator.rs +++ b/client/consensus/aura/src/collator.rs @@ -232,7 +232,7 @@ where Ok((collation, block_data, post_hash)) } else { - Err(format!("Unable to produce collation").into()) + Err("Unable to produce collation".to_string().into()) } } diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index 1f71d3e5dfa..48ac4e96344 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -252,8 +252,8 @@ pub async fn find_potential_parents( Some(h) => h, }; - ancestry.push((current_rp, header.state_root().clone())); - current_rp = header.parent_hash().clone(); + ancestry.push((current_rp, *header.state_root())); + current_rp = *header.parent_hash(); // don't iterate back into the genesis block. if header.number == 1 { @@ -295,7 +295,7 @@ pub async fn find_potential_parents( Some(x) => x, }; // Silently swallow if pending block can't decode. - let pending_header = pending_header.map(|p| B::Header::decode(&mut &p.0[..]).ok()).flatten(); + let pending_header = pending_header.and_then(|p| B::Header::decode(&mut &p.0[..]).ok()); let included_hash = included_header.hash(); let pending_hash = pending_header.as_ref().map(|hdr| hdr.hash()); @@ -338,7 +338,7 @@ pub async fn find_potential_parents( } // push children onto search frontier. - for child in client.children(hash).ok().into_iter().flat_map(|c| c) { + for child in client.children(hash).ok().into_iter().flatten() { let aligned_with_pending = parent_aligned_with_pending && if child_depth == 1 { pending_hash.as_ref().map_or(true, |h| &child == h)