From 22d541f13a5d4b3b2e5dcd2389992e0befc631d6 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Wed, 2 Aug 2023 22:16:38 +0300 Subject: [PATCH 1/5] Downgrade debug networking messages --- crates/subspace-networking/src/node_runner.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/subspace-networking/src/node_runner.rs b/crates/subspace-networking/src/node_runner.rs index 470f829daa..deb8fe7a56 100644 --- a/crates/subspace-networking/src/node_runner.rs +++ b/crates/subspace-networking/src/node_runner.rs @@ -551,10 +551,12 @@ where }; } + debug!(?peer_id, "SwarmEvent::OutgoingConnectionError for peer."); + match error { DialError::Transport(ref addresses) => { for (addr, _) in addresses { - debug!(?error, ?peer_id, %addr, "SwarmEvent::OutgoingConnectionError (DialError::Transport) for peer."); + trace!(?error, ?peer_id, %addr, "SwarmEvent::OutgoingConnectionError (DialError::Transport) for peer."); if let Some(peer_id) = peer_id { self.networking_parameters_registry .remove_known_peer_addresses(peer_id, vec![addr.clone()]) @@ -563,7 +565,7 @@ where } } DialError::WrongPeerId { obtained, .. } => { - debug!(?error, ?peer_id, obtained_peer_id=?obtained, "SwarmEvent::WrongPeerId (DialError::WrongPeerId) for peer."); + trace!(?error, ?peer_id, obtained_peer_id=?obtained, "SwarmEvent::WrongPeerId (DialError::WrongPeerId) for peer."); if let Some(ref peer_id) = peer_id { let kademlia = &mut self.swarm.behaviour_mut().kademlia; @@ -571,7 +573,7 @@ where } } _ => { - debug!(?error, ?peer_id, "SwarmEvent::OutgoingConnectionError"); + trace!(?error, ?peer_id, "SwarmEvent::OutgoingConnectionError"); } } } From d75ef2187eca5b637d6144cdc3bf5192454cf881 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Fri, 4 Aug 2023 15:29:41 +0300 Subject: [PATCH 2/5] Improve sync effectiveness by checking last 2 segment headers only --- .../subspace-service/src/dsn/import_blocks/segment_headers.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/subspace-service/src/dsn/import_blocks/segment_headers.rs b/crates/subspace-service/src/dsn/import_blocks/segment_headers.rs index 2e3832bd3d..9aa16fa04e 100644 --- a/crates/subspace-service/src/dsn/import_blocks/segment_headers.rs +++ b/crates/subspace-service/src/dsn/import_blocks/segment_headers.rs @@ -113,7 +113,9 @@ impl SegmentHeaderHandler { .send_generic_request( peer_id, SegmentHeaderRequest::LastSegmentHeaders { - segment_header_number: SEGMENT_HEADER_NUMBER_PER_REQUEST, + // Request 2 top segment headers, accounting for situations when new + // segment header was just produced and not all nodes have it + segment_header_number: 2, }, ) .await; From 2d4f2a4b61c7a1449033d8f06bb74f19487f3d9b Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Sun, 6 Aug 2023 03:24:53 +0300 Subject: [PATCH 3/5] Allow customizing cache size for farmers with CLI argument --- .../src/bin/subspace-farmer/commands/farm.rs | 2 ++ .../src/bin/subspace-farmer/main.rs | 20 +++++++++++--- .../subspace-farmer/src/single_disk_plot.rs | 27 ++++++++++++++----- 3 files changed, 38 insertions(+), 11 deletions(-) diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs index fcfb037bd3..311be5ed62 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs @@ -62,6 +62,7 @@ where disable_farming, mut dsn, max_concurrent_plots, + cache_percentage, no_info: _, } = farming_args; @@ -180,6 +181,7 @@ where kzg: kzg.clone(), erasure_coding: erasure_coding.clone(), piece_getter: piece_getter.clone(), + cache_percentage, }, disk_farm_index, ); diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/main.rs b/crates/subspace-farmer/src/bin/subspace-farmer/main.rs index 09912f0b76..3d51fa6ee9 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/main.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/main.rs @@ -5,12 +5,11 @@ mod ss58; mod utils; use crate::utils::get_usable_plot_space; -use anyhow::Result; use bytesize::ByteSize; use clap::{Parser, ValueEnum, ValueHint}; use ss58::parse_ss58_reward_address; use std::fs; -use std::num::{NonZeroU16, NonZeroUsize}; +use std::num::{NonZeroU16, NonZeroU8, NonZeroUsize}; use std::path::PathBuf; use std::str::FromStr; use subspace_core_primitives::PublicKey; @@ -61,11 +60,24 @@ struct FarmingArgs { /// Number of plots that can be plotted concurrently, impacts RAM usage. #[arg(long, default_value = "10")] max_concurrent_plots: NonZeroUsize, + /// Percentage of plot dedicated for caching purposes, 99% max. + #[arg(long, default_value = "1", value_parser = cache_percentage_parser)] + cache_percentage: NonZeroU8, /// Do not print info about configured farms on startup. #[arg(long)] no_info: bool, } +fn cache_percentage_parser(s: &str) -> anyhow::Result { + let cache_percentage = NonZeroU8::from_str(s)?; + + if cache_percentage.get() > 99 { + return Err(anyhow::anyhow!("Cache percentage can't exceed 100")); + } + + Ok(cache_percentage) +} + /// Arguments for DSN #[derive(Debug, Parser)] struct DsnArgs { @@ -140,7 +152,7 @@ struct DiskFarm { impl FromStr for DiskFarm { type Err = String; - fn from_str(s: &str) -> Result { + fn from_str(s: &str) -> anyhow::Result { let parts = s.split(',').collect::>(); if parts.len() != 2 { return Err("Must contain 2 coma-separated components".to_string()); @@ -227,7 +239,7 @@ struct Command { } #[tokio::main] -async fn main() -> Result<()> { +async fn main() -> anyhow::Result<()> { tracing_subscriber::registry() .with( fmt::layer().with_filter( diff --git a/crates/subspace-farmer/src/single_disk_plot.rs b/crates/subspace-farmer/src/single_disk_plot.rs index 94f8fb3ee1..1d2bcc9bdd 100644 --- a/crates/subspace-farmer/src/single_disk_plot.rs +++ b/crates/subspace-farmer/src/single_disk_plot.rs @@ -28,7 +28,7 @@ use static_assertions::const_assert; use std::fs::OpenOptions; use std::future::Future; use std::io::{Seek, SeekFrom}; -use std::num::NonZeroU16; +use std::num::{NonZeroU16, NonZeroU8}; use std::path::{Path, PathBuf}; use std::pin::Pin; use std::sync::Arc; @@ -55,9 +55,6 @@ const_assert!(std::mem::size_of::() >= std::mem::size_of::()); /// Reserve 1M of space for plot metadata (for potential future expansion) const RESERVED_PLOT_METADATA: u64 = 1024 * 1024; -/// Piece cache occupies 1% of allocated space -// TODO: Allow increasing this with CLI parameter -const PIECE_CACHE_FRACTION: (usize, usize) = (1, 100); /// Semaphore that limits disk access concurrency in strategic places to the number specified during /// initialization @@ -269,6 +266,8 @@ pub struct SingleDiskPlotOptions { pub kzg: Kzg, /// Erasure coding instance to use. pub erasure_coding: ErasureCoding, + /// Percentage of disk plot dedicated for caching purposes + pub cache_percentage: NonZeroU8, } /// Errors happening when trying to create/open single disk plot @@ -458,6 +457,7 @@ impl SingleDiskPlot { piece_getter, kzg, erasure_coding, + cache_percentage, } = options; fs::create_dir_all(&directory)?; @@ -546,7 +546,21 @@ impl SingleDiskPlot { let pieces_in_sector = single_disk_plot_info.pieces_in_sector(); let sector_size = sector_size(max_pieces_in_sector); let sector_metadata_size = SectorMetadata::encoded_size(); - let target_sector_count = single_disk_plot_info.allocated_space() / sector_size as u64; + // TODO: Split allocated space into more components once all components are deterministic + // to correctly size everything: + // * plot info + // * identity + // * metadata + // * plot + // * known_addresses + let cache_size = allocated_space / 100 * u64::from(cache_percentage.get()); + // We have a hardcoded value decreasing allocated space to account for things like cache, + // don't change plot size (yet) if cache is under 5% that is assumed to be accounted for, + // this will be removed once we have proper static allocation as described in TODO above + let cache_size_exceeding_5_percents = + allocated_space / 100 * u64::from(cache_percentage.get()).saturating_sub(5); + let plot_space = (allocated_space - cache_size_exceeding_5_percents) / sector_size as u64; + let target_sector_count = plot_space; let target_sector_count = match SectorIndex::try_from(target_sector_count) { Ok(target_sector_count) if target_sector_count < SectorIndex::MAX => { target_sector_count @@ -648,8 +662,7 @@ impl SingleDiskPlot { let piece_cache = DiskPieceCache::open( &directory, - allocated_space as usize / PIECE_CACHE_FRACTION.1 * PIECE_CACHE_FRACTION.0 - / DiskPieceCache::element_size(), + cache_size as usize / DiskPieceCache::element_size(), )?; let (error_sender, error_receiver) = oneshot::channel(); From 1371b9574115dbb3fae3d664f1a0b5cab01e996d Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Sun, 6 Aug 2023 04:26:07 +0300 Subject: [PATCH 4/5] Disable incremental archiving during major sync --- crates/pallet-subspace/src/mock.rs | 2 +- crates/sc-consensus-subspace/src/archiver.rs | 16 ++++++--- crates/sp-lightclient/src/tests.rs | 2 +- .../subspace-archiving/benches/archiving.rs | 33 +++++++++++++---- crates/subspace-archiving/src/archiver.rs | 12 ++++--- .../tests/integration/archiver.rs | 35 +++++++++++++------ .../tests/integration/piece_reconstruction.rs | 8 ++--- .../tests/integration/reconstructor.rs | 16 ++++----- .../benches/auditing.rs | 1 + .../benches/plotting.rs | 1 + .../benches/proving.rs | 1 + .../benches/reading.rs | 1 + crates/subspace-service/src/lib.rs | 29 +++++++-------- test/subspace-test-client/src/lib.rs | 2 +- 14 files changed, 105 insertions(+), 54 deletions(-) diff --git a/crates/pallet-subspace/src/mock.rs b/crates/pallet-subspace/src/mock.rs index 5b65c24f2b..05bd60ce60 100644 --- a/crates/pallet-subspace/src/mock.rs +++ b/crates/pallet-subspace/src/mock.rs @@ -378,7 +378,7 @@ pub fn create_archived_segment(kzg: Kzg) -> NewArchivedSegment { let mut block = vec![0u8; RecordedHistorySegment::SIZE]; rand::thread_rng().fill(block.as_mut_slice()); archiver - .add_block(block, Default::default()) + .add_block(block, Default::default(), true) .into_iter() .next() .unwrap() diff --git a/crates/sc-consensus-subspace/src/archiver.rs b/crates/sc-consensus-subspace/src/archiver.rs index 929c300055..5378e25154 100644 --- a/crates/sc-consensus-subspace/src/archiver.rs +++ b/crates/sc-consensus-subspace/src/archiver.rs @@ -16,7 +16,7 @@ use crate::{ get_chain_constants, ArchivedSegmentNotification, BlockImportingNotification, SubspaceLink, - SubspaceNotificationSender, + SubspaceNotificationSender, SubspaceSyncOracle, }; use codec::{Decode, Encode}; use futures::StreamExt; @@ -29,6 +29,7 @@ use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sc_utils::mpsc::tracing_unbounded; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; +use sp_consensus::SyncOracle; use sp_consensus_subspace::{FarmerPublicKey, SubspaceApi}; use sp_objects::ObjectsApi; use sp_runtime::generic::SignedBlock; @@ -500,7 +501,8 @@ where encoded_block.len() as f32 / 1024.0 ); - let archived_segments = archiver.add_block(encoded_block, block_object_mappings); + let archived_segments = + archiver.add_block(encoded_block, block_object_mappings, false); let new_segment_headers: Vec = archived_segments .iter() .map(|archived_segment| archived_segment.segment_header) @@ -584,10 +586,11 @@ fn finalize_block( /// `store_segment_header` extrinsic). /// /// NOTE: Archiver is doing blocking operations and must run in a dedicated task. -pub fn create_subspace_archiver( +pub fn create_subspace_archiver( segment_headers_store: SegmentHeadersStore, subspace_link: &SubspaceLink, client: Arc, + sync_oracle: SubspaceSyncOracle, telemetry: Option, ) -> impl Future + Send + 'static where @@ -604,6 +607,7 @@ where + 'static, Client::Api: SubspaceApi + ObjectsApi, AS: AuxStore + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + 'static, { let client_info = client.info(); let best_block_hash = client_info.best_hash; @@ -725,7 +729,11 @@ where ); let mut new_segment_headers = Vec::new(); - for archived_segment in archiver.add_block(encoded_block, block_object_mappings) { + for archived_segment in archiver.add_block( + encoded_block, + block_object_mappings, + !sync_oracle.is_major_syncing(), + ) { let segment_header = archived_segment.segment_header; if let Err(error) = diff --git a/crates/sp-lightclient/src/tests.rs b/crates/sp-lightclient/src/tests.rs index 4683388b97..3ec6c9a658 100644 --- a/crates/sp-lightclient/src/tests.rs +++ b/crates/sp-lightclient/src/tests.rs @@ -74,7 +74,7 @@ fn archived_segment(kzg: Kzg) -> NewArchivedSegment { let mut archiver = Archiver::new(kzg).unwrap(); archiver - .add_block(block, Default::default()) + .add_block(block, Default::default(), true) .into_iter() .next() .unwrap() diff --git a/crates/subspace-archiving/benches/archiving.rs b/crates/subspace-archiving/benches/archiving.rs index bd16eeb0ea..0cd77d8c7d 100644 --- a/crates/subspace-archiving/benches/archiving.rs +++ b/crates/subspace-archiving/benches/archiving.rs @@ -1,10 +1,10 @@ -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; use rand::{thread_rng, Rng}; use subspace_archiving::archiver::Archiver; use subspace_core_primitives::crypto::kzg; use subspace_core_primitives::crypto::kzg::Kzg; -const AMOUNT_OF_DATA: usize = 1024 * 1024; +const AMOUNT_OF_DATA: usize = 5 * 1024 * 1024; const SMALL_BLOCK_SIZE: usize = 500; fn criterion_benchmark(c: &mut Criterion) { @@ -15,17 +15,36 @@ fn criterion_benchmark(c: &mut Criterion) { c.bench_function("segment-archiving-large-block", |b| { b.iter(|| { - archiver - .clone() - .add_block(input.clone(), Default::default()); + archiver.clone().add_block( + black_box(input.clone()), + black_box(Default::default()), + black_box(true), + ); }) }); - c.bench_function("segment-archiving-small-blocks", |b| { + c.bench_function("segment-archiving-small-blocks/incremental", |b| { b.iter(|| { let mut archiver = archiver.clone(); for chunk in input.chunks(SMALL_BLOCK_SIZE) { - archiver.add_block(chunk.to_vec(), Default::default()); + archiver.add_block( + black_box(chunk.to_vec()), + black_box(Default::default()), + black_box(true), + ); + } + }) + }); + + c.bench_function("segment-archiving-small-blocks/non-incremental", |b| { + b.iter(|| { + let mut archiver = archiver.clone(); + for chunk in input.chunks(SMALL_BLOCK_SIZE) { + archiver.add_block( + black_box(chunk.to_vec()), + black_box(Default::default()), + black_box(false), + ); } }) }); diff --git a/crates/subspace-archiving/src/archiver.rs b/crates/subspace-archiving/src/archiver.rs index 517b47cfce..b09f0f9bae 100644 --- a/crates/subspace-archiving/src/archiver.rs +++ b/crates/subspace-archiving/src/archiver.rs @@ -341,11 +341,15 @@ impl Archiver { } } - /// Adds new block to internal buffer, potentially producing pieces and segment header headers + /// Adds new block to internal buffer, potentially producing pieces and segment header headers. + /// + /// Incremental archiving can be enabled if amortized block addition cost is preferred over + /// throughput. pub fn add_block( &mut self, bytes: Vec, object_mapping: BlockObjectMapping, + incremental: bool, ) -> Vec { // Append new block to the buffer self.buffer.push_back(SegmentItem::Block { @@ -355,7 +359,7 @@ impl Archiver { let mut archived_segments = Vec::new(); - while let Some(segment) = self.produce_segment() { + while let Some(segment) = self.produce_segment(incremental) { archived_segments.push(self.produce_archived_segment(segment)); } @@ -364,7 +368,7 @@ impl Archiver { /// Try to slice buffer contents into segments if there is enough data, producing one segment at /// a time - fn produce_segment(&mut self) -> Option { + fn produce_segment(&mut self, incremental: bool) -> Option { let mut segment = Segment::V0 { items: Vec::with_capacity(self.buffer.len()), }; @@ -383,7 +387,7 @@ impl Archiver { let bytes_committed_to = existing_commitments * RawRecord::SIZE; // Run incremental archiver only when there is at least two records to archive, // otherwise we're wasting CPU cycles encoding segment over and over again - if segment_size - bytes_committed_to >= RawRecord::SIZE * 2 { + if incremental && segment_size - bytes_committed_to >= RawRecord::SIZE * 2 { update_record_commitments( &mut self.incremental_record_commitments, &segment, diff --git a/crates/subspace-archiving/tests/integration/archiver.rs b/crates/subspace-archiving/tests/integration/archiver.rs index f67bd8c4e5..025f789dbf 100644 --- a/crates/subspace-archiving/tests/integration/archiver.rs +++ b/crates/subspace-archiving/tests/integration/archiver.rs @@ -95,7 +95,7 @@ fn archiver() { }; // There is not enough data to produce archived segment yet assert!(archiver - .add_block(block_0.clone(), block_0_object_mapping.clone()) + .add_block(block_0.clone(), block_0_object_mapping.clone(), true) .is_empty()); let (block_1, block_1_object_mapping) = { @@ -133,7 +133,8 @@ fn archiver() { (block, object_mapping) }; // This should produce 1 archived segment - let archived_segments = archiver.add_block(block_1.clone(), block_1_object_mapping.clone()); + let archived_segments = + archiver.add_block(block_1.clone(), block_1_object_mapping.clone(), true); assert_eq!(archived_segments.len(), 1); let first_archived_segment = archived_segments.into_iter().next().unwrap(); @@ -210,7 +211,8 @@ fn archiver() { block }; // This should be big enough to produce two archived segments in one go - let archived_segments = archiver.add_block(block_2.clone(), BlockObjectMapping::default()); + let archived_segments = + archiver.add_block(block_2.clone(), BlockObjectMapping::default(), true); assert_eq!(archived_segments.len(), 2); // Check that initializing archiver with initial state before last block results in the same @@ -225,7 +227,11 @@ fn archiver() { .unwrap(); assert_eq!( - archiver_with_initial_state.add_block(block_2.clone(), BlockObjectMapping::default()), + archiver_with_initial_state.add_block( + block_2.clone(), + BlockObjectMapping::default(), + true + ), archived_segments, ); } @@ -331,7 +337,8 @@ fn archiver() { thread_rng().fill(block.as_mut_slice()); block }; - let archived_segments = archiver.add_block(block_3.clone(), BlockObjectMapping::default()); + let archived_segments = + archiver.add_block(block_3.clone(), BlockObjectMapping::default(), true); assert_eq!(archived_segments.len(), 1); // Check that initializing archiver with initial state before last block results in the same @@ -346,7 +353,7 @@ fn archiver() { .unwrap(); assert_eq!( - archiver_with_initial_state.add_block(block_3, BlockObjectMapping::default()), + archiver_with_initial_state.add_block(block_3, BlockObjectMapping::default(), true), archived_segments, ); } @@ -466,7 +473,7 @@ fn one_byte_smaller_segment() { assert_eq!( Archiver::new(kzg.clone()) .unwrap() - .add_block(vec![0u8; block_size], BlockObjectMapping::default()) + .add_block(vec![0u8; block_size], BlockObjectMapping::default(), true) .len(), 1 ); @@ -474,7 +481,11 @@ fn one_byte_smaller_segment() { // against code regressions assert!(Archiver::new(kzg) .unwrap() - .add_block(vec![0u8; block_size - 1], BlockObjectMapping::default()) + .add_block( + vec![0u8; block_size - 1], + BlockObjectMapping::default(), + true + ) .is_empty()); } @@ -498,7 +509,7 @@ fn spill_over_edge_case() { // We leave three bytes at the end intentionally - 3; assert!(archiver - .add_block(vec![0u8; block_size], BlockObjectMapping::default()) + .add_block(vec![0u8; block_size], BlockObjectMapping::default(), true) .is_empty()); // Here we add one more block with internal length that takes 4 bytes in compact length @@ -513,6 +524,7 @@ fn spill_over_edge_case() { offset: 0, }], }, + true, ); assert_eq!(archived_segments.len(), 2); // If spill over actually happened, we'll not find object mapping in the first segment @@ -539,7 +551,8 @@ fn object_on_the_edge_of_segment() { let kzg = Kzg::new(embedded_kzg_settings()); let mut archiver = Archiver::new(kzg).unwrap(); let first_block = vec![0u8; RecordedHistorySegment::SIZE]; - let archived_segments = archiver.add_block(first_block.clone(), BlockObjectMapping::default()); + let archived_segments = + archiver.add_block(first_block.clone(), BlockObjectMapping::default(), true); assert_eq!(archived_segments.len(), 1); let archived_segment = archived_segments.into_iter().next().unwrap(); let left_unarchived_from_first_block = first_block.len() as u32 @@ -600,6 +613,7 @@ fn object_on_the_edge_of_segment() { offset: object_mapping.offset() - 1, }], }, + true, ); assert_eq!(archived_segments.len(), 2); @@ -618,6 +632,7 @@ fn object_on_the_edge_of_segment() { BlockObjectMapping { objects: vec![object_mapping], }, + true, ); assert_eq!(archived_segments.len(), 2); diff --git a/crates/subspace-archiving/tests/integration/piece_reconstruction.rs b/crates/subspace-archiving/tests/integration/piece_reconstruction.rs index 07dd21de78..c1426f035b 100644 --- a/crates/subspace-archiving/tests/integration/piece_reconstruction.rs +++ b/crates/subspace-archiving/tests/integration/piece_reconstruction.rs @@ -25,7 +25,7 @@ fn segment_reconstruction_works() { let block = get_random_block(); - let archived_segments = archiver.add_block(block, BlockObjectMapping::default()); + let archived_segments = archiver.add_block(block, BlockObjectMapping::default(), true); assert_eq!(archived_segments.len(), 1); @@ -66,7 +66,7 @@ fn piece_reconstruction_works() { // Block that fits into the segment fully let block = get_random_block(); - let archived_segments = archiver.add_block(block, BlockObjectMapping::default()); + let archived_segments = archiver.add_block(block, BlockObjectMapping::default(), true); assert_eq!(archived_segments.len(), 1); @@ -126,7 +126,7 @@ fn segment_reconstruction_fails() { // Block that fits into the segment fully let block = get_random_block(); - let archived_segments = archiver.add_block(block, BlockObjectMapping::default()); + let archived_segments = archiver.add_block(block, BlockObjectMapping::default(), true); assert_eq!(archived_segments.len(), 1); @@ -163,7 +163,7 @@ fn piece_reconstruction_fails() { // Block that fits into the segment fully let block = get_random_block(); - let archived_segments = archiver.add_block(block, BlockObjectMapping::default()); + let archived_segments = archiver.add_block(block, BlockObjectMapping::default(), true); assert_eq!(archived_segments.len(), 1); diff --git a/crates/subspace-archiving/tests/integration/reconstructor.rs b/crates/subspace-archiving/tests/integration/reconstructor.rs index 06b551a4a4..bd4e9860fd 100644 --- a/crates/subspace-archiving/tests/integration/reconstructor.rs +++ b/crates/subspace-archiving/tests/integration/reconstructor.rs @@ -49,12 +49,12 @@ fn basic() { block }; let archived_segments = archiver - .add_block(block_0.clone(), BlockObjectMapping::default()) + .add_block(block_0.clone(), BlockObjectMapping::default(), true) .into_iter() - .chain(archiver.add_block(block_1.clone(), BlockObjectMapping::default())) - .chain(archiver.add_block(block_2.clone(), BlockObjectMapping::default())) - .chain(archiver.add_block(block_3.clone(), BlockObjectMapping::default())) - .chain(archiver.add_block(block_4, BlockObjectMapping::default())) + .chain(archiver.add_block(block_1.clone(), BlockObjectMapping::default(), true)) + .chain(archiver.add_block(block_2.clone(), BlockObjectMapping::default(), true)) + .chain(archiver.add_block(block_3.clone(), BlockObjectMapping::default(), true)) + .chain(archiver.add_block(block_4, BlockObjectMapping::default(), true)) .collect::>(); assert_eq!(archived_segments.len(), 5); @@ -257,9 +257,9 @@ fn partial_data() { block }; let archived_segments = archiver - .add_block(block_0.clone(), BlockObjectMapping::default()) + .add_block(block_0.clone(), BlockObjectMapping::default(), true) .into_iter() - .chain(archiver.add_block(block_1, BlockObjectMapping::default())) + .chain(archiver.add_block(block_1, BlockObjectMapping::default(), true)) .collect::>(); assert_eq!(archived_segments.len(), 1); @@ -332,7 +332,7 @@ fn invalid_usage() { block }; - let archived_segments = archiver.add_block(block_0, BlockObjectMapping::default()); + let archived_segments = archiver.add_block(block_0, BlockObjectMapping::default(), true); assert_eq!(archived_segments.len(), 4); diff --git a/crates/subspace-farmer-components/benches/auditing.rs b/crates/subspace-farmer-components/benches/auditing.rs index 5565ec6813..9e98eaff01 100644 --- a/crates/subspace-farmer-components/benches/auditing.rs +++ b/crates/subspace-farmer-components/benches/auditing.rs @@ -57,6 +57,7 @@ pub fn criterion_benchmark(c: &mut Criterion) { .add_block( AsRef::<[u8]>::as_ref(input.as_ref()).to_vec(), Default::default(), + true, ) .into_iter() .next() diff --git a/crates/subspace-farmer-components/benches/plotting.rs b/crates/subspace-farmer-components/benches/plotting.rs index f15369d29f..c442206ebe 100644 --- a/crates/subspace-farmer-components/benches/plotting.rs +++ b/crates/subspace-farmer-components/benches/plotting.rs @@ -39,6 +39,7 @@ fn criterion_benchmark(c: &mut Criterion) { .add_block( AsRef::<[u8]>::as_ref(input.as_ref()).to_vec(), Default::default(), + true, ) .into_iter() .next() diff --git a/crates/subspace-farmer-components/benches/proving.rs b/crates/subspace-farmer-components/benches/proving.rs index eaf3411224..d844152061 100644 --- a/crates/subspace-farmer-components/benches/proving.rs +++ b/crates/subspace-farmer-components/benches/proving.rs @@ -59,6 +59,7 @@ pub fn criterion_benchmark(c: &mut Criterion) { .add_block( AsRef::<[u8]>::as_ref(input.as_ref()).to_vec(), Default::default(), + true, ) .into_iter() .next() diff --git a/crates/subspace-farmer-components/benches/reading.rs b/crates/subspace-farmer-components/benches/reading.rs index 5540ac9ee3..774f5bcf50 100644 --- a/crates/subspace-farmer-components/benches/reading.rs +++ b/crates/subspace-farmer-components/benches/reading.rs @@ -56,6 +56,7 @@ pub fn criterion_benchmark(c: &mut Criterion) { .add_block( AsRef::<[u8]>::as_ref(input.as_ref()).to_vec(), Default::default(), + true, ) .into_iter() .next() diff --git a/crates/subspace-service/src/lib.rs b/crates/subspace-service/src/lib.rs index 38c09822f6..ebbaa6b843 100644 --- a/crates/subspace-service/src/lib.rs +++ b/crates/subspace-service/src/lib.rs @@ -695,17 +695,6 @@ where } }; - let subspace_archiver = sc_consensus_subspace::create_subspace_archiver( - segment_headers_store.clone(), - &subspace_link, - client.clone(), - telemetry.as_ref().map(|telemetry| telemetry.handle()), - ); - - task_manager - .spawn_essential_handle() - .spawn_essential_blocking("subspace-archiver", None, Box::pin(subspace_archiver)); - // TODO: This prevents SIGINT from working properly if config.sync_from_dsn { info!("⚙️ Starting initial sync from DSN, this might take some time"); @@ -776,6 +765,21 @@ where block_relay, })?; + let subspace_sync_oracle = + SubspaceSyncOracle::new(config.force_authoring, sync_service.clone()); + + let subspace_archiver = sc_consensus_subspace::create_subspace_archiver( + segment_headers_store.clone(), + &subspace_link, + client.clone(), + subspace_sync_oracle.clone(), + telemetry.as_ref().map(|telemetry| telemetry.handle()), + ); + + task_manager + .spawn_essential_handle() + .spawn_essential_blocking("subspace-archiver", None, Box::pin(subspace_archiver)); + if config.enable_subspace_block_relay { network_wrapper.set(network_service.clone()); } @@ -841,9 +845,6 @@ where let block_importing_notification_stream = subspace_link.block_importing_notification_stream(); let archived_segment_notification_stream = subspace_link.archived_segment_notification_stream(); - let subspace_sync_oracle = - SubspaceSyncOracle::new(config.force_authoring, sync_service.clone()); - if config.role.is_authority() || config.force_new_slot_notifications { let proposer_factory = ProposerFactory::new( task_manager.spawn_handle(), diff --git a/test/subspace-test-client/src/lib.rs b/test/subspace-test-client/src/lib.rs index 4b5e0f26da..dda0d75b70 100644 --- a/test/subspace-test-client/src/lib.rs +++ b/test/subspace-test-client/src/lib.rs @@ -228,7 +228,7 @@ where let genesis_block = client.block(client.info().genesis_hash).unwrap().unwrap(); let archived_segment = archiver - .add_block(genesis_block.encode(), BlockObjectMapping::default()) + .add_block(genesis_block.encode(), BlockObjectMapping::default(), true) .into_iter() .next() .expect("First block is always producing one segment; qed"); From f43bf8fe9bc97fc9314886151fad332b45f276f6 Mon Sep 17 00:00:00 2001 From: vedhavyas Date: Mon, 7 Aug 2023 10:21:36 +0200 Subject: [PATCH 5/5] enable ETH rpc --- domains/service/src/domain.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/domains/service/src/domain.rs b/domains/service/src/domain.rs index 243d08b9f7..4c02612aad 100644 --- a/domains/service/src/domain.rs +++ b/domains/service/src/domain.rs @@ -341,7 +341,7 @@ where { let DomainParams { domain_id, - domain_config, + mut domain_config, domain_created_at, consensus_client, consensus_network_sync_oracle, @@ -390,6 +390,7 @@ where })?; let is_authority = domain_config.service_config.role.is_authority(); + domain_config.service_config.rpc_id_provider = provider.rpc_id(); let rpc_builder = { let deps = crate::rpc::FullDeps { client: client.clone(), @@ -406,7 +407,18 @@ where backend: backend.clone(), }; - Box::new(move |_, _| crate::rpc::create_full(deps.clone()).map_err(Into::into)) + let spawn_essential = task_manager.spawn_essential_handle(); + let rpc_deps = provider.deps(deps)?; + Box::new(move |_, subscription_task_executor| { + let spawn_essential = spawn_essential.clone(); + provider + .rpc_builder( + rpc_deps.clone(), + subscription_task_executor, + spawn_essential, + ) + .map_err(Into::into) + }) }; let rpc_handlers = sc_service::spawn_tasks(SpawnTasksParams {