-
Notifications
You must be signed in to change notification settings - Fork 85
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
refactor(sync): make changes to allow easily adding more types to sync
- Loading branch information
1 parent
bade592
commit 8507d96
Showing
5 changed files
with
201 additions
and
86 deletions.
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,81 @@ | ||
use std::pin::Pin; | ||
|
||
use futures::future::BoxFuture; | ||
use futures::{FutureExt, Stream, StreamExt}; | ||
use papyrus_network::{DataType, SignedBlockHeader}; | ||
use papyrus_storage::header::{HeaderStorageReader, HeaderStorageWriter}; | ||
use papyrus_storage::{StorageError, StorageReader, StorageWriter}; | ||
use starknet_api::block::BlockNumber; | ||
use tracing::debug; | ||
|
||
use crate::stream_factory::{BlockData, BlockNumberLimit, DataStreamFactory}; | ||
use crate::{P2PSyncError, ALLOWED_SIGNATURES_LENGTH, NETWORK_DATA_TIMEOUT}; | ||
|
||
impl BlockData for SignedBlockHeader { | ||
fn write_to_storage(&self, storage_writer: &mut StorageWriter) -> Result<(), StorageError> { | ||
storage_writer | ||
.begin_rw_txn()? | ||
.append_header(self.block_header.block_number, &self.block_header)? | ||
.append_block_signature( | ||
self.block_header.block_number, | ||
self | ||
.signatures | ||
// In the future we will support multiple signatures. | ||
.first() | ||
// The verification that the size of the vector is 1 is done in the data | ||
// verification. | ||
.expect("Vec::first should return a value on a vector of size 1"), | ||
)? | ||
.commit() | ||
} | ||
} | ||
|
||
pub(crate) struct HeaderStreamFactory; | ||
|
||
impl DataStreamFactory for HeaderStreamFactory { | ||
type InputFromNetwork = SignedBlockHeader; | ||
type Output = SignedBlockHeader; | ||
|
||
const DATA_TYPE: DataType = DataType::SignedBlockHeader; | ||
const BLOCK_NUMBER_LIMIT: BlockNumberLimit = BlockNumberLimit::Unlimited; | ||
const SHOULD_LOG_ADDED_BLOCK: bool = true; | ||
|
||
fn parse_data_for_block<'a>( | ||
signed_headers_receiver: &'a mut Pin< | ||
Box<dyn Stream<Item = Option<Self::InputFromNetwork>> + Send>, | ||
>, | ||
block_number: BlockNumber, | ||
_storage_reader: &'a StorageReader, | ||
) -> BoxFuture<'a, Result<Option<Self::Output>, P2PSyncError>> { | ||
async move { | ||
let maybe_signed_header_stream_result = | ||
tokio::time::timeout(NETWORK_DATA_TIMEOUT, signed_headers_receiver.next()).await?; | ||
let Some(maybe_signed_header) = maybe_signed_header_stream_result else { | ||
return Err(P2PSyncError::ReceiverChannelTerminated); | ||
}; | ||
let Some(signed_block_header) = maybe_signed_header else { | ||
debug!("Header query sent to network finished"); | ||
return Ok(None); | ||
}; | ||
// TODO(shahak): Check that parent_hash is the same as the previous block's hash | ||
// and handle reverts. | ||
if block_number != signed_block_header.block_header.block_number { | ||
return Err(P2PSyncError::HeadersUnordered { | ||
expected_block_number: block_number, | ||
actual_block_number: signed_block_header.block_header.block_number, | ||
}); | ||
} | ||
if signed_block_header.signatures.len() != ALLOWED_SIGNATURES_LENGTH { | ||
return Err(P2PSyncError::WrongSignaturesLength { | ||
signatures: signed_block_header.signatures, | ||
}); | ||
} | ||
Ok(Some(signed_block_header)) | ||
} | ||
.boxed() | ||
} | ||
|
||
fn get_start_block_number(storage_reader: &StorageReader) -> Result<BlockNumber, StorageError> { | ||
storage_reader.begin_ro_txn()?.get_header_marker() | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
use std::pin::Pin; | ||
use std::time::Duration; | ||
|
||
use async_stream::stream; | ||
use futures::channel::mpsc::Sender; | ||
use futures::future::BoxFuture; | ||
use futures::stream::BoxStream; | ||
use futures::{SinkExt, Stream, StreamExt}; | ||
use papyrus_network::{DataType, Direction, Query}; | ||
use papyrus_storage::{StorageError, StorageReader, StorageWriter}; | ||
use starknet_api::block::BlockNumber; | ||
use tracing::{debug, info}; | ||
|
||
use crate::{P2PSyncError, STEP}; | ||
|
||
pub(crate) trait BlockData: Send { | ||
fn write_to_storage(&self, storage_writer: &mut StorageWriter) -> Result<(), StorageError>; | ||
} | ||
|
||
pub(crate) enum BlockNumberLimit { | ||
Unlimited, | ||
// TODO(shahak): Add variant for header marker once we support state diff sync. | ||
// TODO(shahak): Add variant for state diff marker once we support classes sync. | ||
} | ||
|
||
pub(crate) trait DataStreamFactory { | ||
type InputFromNetwork: Send + 'static; | ||
type Output: BlockData + 'static; | ||
|
||
const DATA_TYPE: DataType; | ||
const BLOCK_NUMBER_LIMIT: BlockNumberLimit; | ||
const SHOULD_LOG_ADDED_BLOCK: bool; | ||
|
||
// Async functions in trait don't work well with argument references | ||
fn parse_data_for_block<'a>( | ||
data_receiver: &'a mut Pin<Box<dyn Stream<Item = Option<Self::InputFromNetwork>> + Send>>, | ||
block_number: BlockNumber, | ||
storage_reader: &'a StorageReader, | ||
) -> BoxFuture<'a, Result<Option<Self::Output>, P2PSyncError>>; | ||
|
||
fn get_start_block_number(storage_reader: &StorageReader) -> Result<BlockNumber, StorageError>; | ||
|
||
fn create_stream( | ||
mut data_receiver: Pin<Box<dyn Stream<Item = Option<Self::InputFromNetwork>> + Send>>, | ||
mut query_sender: Sender<Query>, | ||
storage_reader: StorageReader, | ||
wait_period_for_new_data: Duration, | ||
num_blocks_per_query: usize, | ||
) -> BoxStream<'static, Result<Box<dyn BlockData>, P2PSyncError>> { | ||
stream! { | ||
let mut current_block_number = Self::get_start_block_number(&storage_reader)?; | ||
'send_query_and_parse_responses: loop { | ||
let end_block_number = current_block_number.0 | ||
+ u64::try_from(num_blocks_per_query) | ||
.expect("Failed converting usize to u64"); | ||
debug!("Downloading {:?} for blocks [{}, {})", Self::DATA_TYPE, current_block_number.0, end_block_number); | ||
query_sender | ||
.send(Query { | ||
start_block: current_block_number, | ||
direction: Direction::Forward, | ||
limit: num_blocks_per_query, | ||
step: STEP, | ||
data_type: Self::DATA_TYPE, | ||
}) | ||
.await?; | ||
|
||
while current_block_number.0 < end_block_number { | ||
match Self::parse_data_for_block( | ||
&mut data_receiver, current_block_number, &storage_reader | ||
).await? { | ||
Some(output) => yield Ok(Box::<dyn BlockData>::from(Box::new(output))), | ||
None => { | ||
debug!( | ||
"Query for {:?} returned with partial data. Waiting {:?} before \ | ||
sending another query.", | ||
Self::DATA_TYPE, | ||
wait_period_for_new_data | ||
); | ||
tokio::time::sleep(wait_period_for_new_data).await; | ||
continue 'send_query_and_parse_responses; | ||
} | ||
} | ||
if Self::SHOULD_LOG_ADDED_BLOCK { | ||
info!("Added block {}.", current_block_number); | ||
} | ||
current_block_number = current_block_number.next(); | ||
} | ||
|
||
// Consume the None message signaling the end of the query. | ||
match data_receiver.next().await { | ||
Some(None) => {}, | ||
Some(Some(_)) => Err(P2PSyncError::TooManyResponses)?, | ||
None => Err(P2PSyncError::ReceiverChannelTerminated)?, | ||
} | ||
} | ||
} | ||
.boxed() | ||
} | ||
} |