diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index dc0863cfa0b3..6bc13769b5a2 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -48,7 +48,7 @@ use polkadot_node_subsystem_util::{ request_validators, Validator, }; use polkadot_primitives::{ - BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CollatorId, + BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, PvfExecTimeoutKind, SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; @@ -354,7 +354,7 @@ async fn handle_active_leaves_update( let group_index = group_rotation_info.group_for_core(core_index, n_cores); if let Some(g) = validator_groups.get(group_index.0 as usize) { if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assignment = Some((scheduled.para_id, scheduled.collator)); + assignment = Some(scheduled.para_id); } groups.insert(scheduled.para_id, g.clone()); } @@ -363,15 +363,15 @@ async fn handle_active_leaves_update( let table_context = TableContext { groups, validators, validator }; - let (assignment, required_collator) = match assignment { + let assignment = match assignment { None => { assignments_span.add_string_tag("assigned", "false"); - (None, None) + None }, - Some((assignment, required_collator)) => { + Some(assignment) => { assignments_span.add_string_tag("assigned", "true"); assignments_span.add_para_id(assignment); - (Some(assignment), required_collator) + Some(assignment) }, }; @@ -381,7 +381,6 @@ async fn handle_active_leaves_update( let job = CandidateBackingJob { parent, assignment, - required_collator, issued_statements: HashSet::new(), awaiting_validation: HashSet::new(), fallbacks: HashMap::new(), @@ -412,8 +411,6 @@ struct CandidateBackingJob { parent: Hash, /// The `ParaId` assigned to this validator assignment: Option, - /// The collator required to author the candidate, if any. - required_collator: Option, /// Spans for all candidates that are not yet backable. unbacked_candidates: HashMap, /// We issued `Seconded`, `Valid` or `Invalid` statements on about these candidates. @@ -911,21 +908,6 @@ impl CandidateBackingJob { candidate: &CandidateReceipt, pov: Arc, ) -> Result<(), Error> { - // Check that candidate is collated by the right collator. - if self - .required_collator - .as_ref() - .map_or(false, |c| c != &candidate.descriptor().collator) - { - // Break cycle - bounded as there is only one candidate to - // second per block. - ctx.send_unbounded_message(CollatorProtocolMessage::Invalid( - self.parent, - candidate.clone(), - )); - return Ok(()) - } - let candidate_hash = candidate.hash(); let mut span = self.get_unbacked_validation_child( root_span, @@ -1169,8 +1151,6 @@ impl CandidateBackingJob { return Ok(()) } - let descriptor = attesting.candidate.descriptor().clone(); - gum::debug!( target: LOG_TARGET, candidate_hash = ?candidate_hash, @@ -1178,16 +1158,6 @@ impl CandidateBackingJob { "Kicking off validation", ); - // Check that candidate is collated by the right collator. - if self.required_collator.as_ref().map_or(false, |c| c != &descriptor.collator) { - // If not, we've got the statement in the table but we will - // not issue validation work for it. - // - // Act as though we've issued a statement. - self.issued_statements.insert(candidate_hash); - return Ok(()) - } - let bg_sender = ctx.sender().clone(); let pov = PoVData::FetchFromValidator { from_validator: attesting.from_validator, diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs index 35c83297fa71..8f1f0791ea00 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests.rs @@ -31,8 +31,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateDescriptor, CollatorId, GroupRotationInfo, HeadData, PersistedValidationData, - PvfExecTimeoutKind, ScheduledCore, + CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecTimeoutKind, + ScheduledCore, }; use sp_application_crypto::AppCrypto; use sp_keyring::Sr25519Keyring; @@ -97,14 +97,10 @@ impl Default for TestState { let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 }; - let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); let availability_cores = vec![ CoreState::Scheduled(ScheduledCore { para_id: chain_a, collator: None }), CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }), - CoreState::Scheduled(ScheduledCore { - para_id: thread_a, - collator: Some(thread_collator.clone()), - }), + CoreState::Scheduled(ScheduledCore { para_id: thread_a, collator: None }), ]; let mut head_data = HashMap::new(); @@ -1190,66 +1186,11 @@ fn backing_works_after_failed_validation() { }); } -// Test that a `CandidateBackingMessage::Second` issues validation work -// and in case validation is successful issues a `StatementDistributionMessage`. -#[test] -fn backing_doesnt_second_wrong_collator() { - let mut test_state = TestState::default(); - test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(1), - collator: Some(Sr25519Keyring::Bob.public().into()), - }); - - test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { - test_startup(&mut virtual_overseer, &test_state).await; - - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - - let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); - - let pov_hash = pov.hash(); - let candidate = TestCandidateBuilder { - para_id: test_state.chain_ids[0], - relay_parent: test_state.relay_parent, - pov_hash, - head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), - ..Default::default() - } - .build(); - - let second = CandidateBackingMessage::Second( - test_state.relay_parent, - candidate.to_plain(), - pov.clone(), - ); - - virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; - - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol( - CollatorProtocolMessage::Invalid(parent, c) - ) if parent == test_state.relay_parent && c == candidate.to_plain() => { - } - ); - - virtual_overseer - .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( - ActiveLeavesUpdate::stop_work(test_state.relay_parent), - ))) - .await; - virtual_overseer - }); -} - #[test] fn validation_work_ignores_wrong_collator() { let mut test_state = TestState::default(); - test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(1), - collator: Some(Sr25519Keyring::Bob.public().into()), - }); + test_state.availability_cores[0] = + CoreState::Scheduled(ScheduledCore { para_id: ParaId::from(1), collator: None }); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index b455285332be..f87a14971e8a 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -921,6 +921,7 @@ async fn process_incoming_peer_message( .span_per_relay_parent .get(&relay_parent) .map(|s| s.child("advertise-collation")); + if !state.view.contains(&relay_parent) { gum::debug!( target: LOG_TARGET, diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index a9e6b45f3b2d..4055ca2ce0d6 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -211,8 +211,7 @@ fn default_parachains_host_configuration( max_pov_size: MAX_POV_SIZE, max_head_data_size: 32 * 1024, group_rotation_frequency: 20, - chain_availability_period: 4, - thread_availability_period: 4, + paras_availability_period: 4, max_upward_queue_count: 8, max_upward_queue_size: 1024 * 1024, max_downward_message_size: 1024 * 1024, @@ -223,10 +222,8 @@ fn default_parachains_host_configuration( hrmp_channel_max_capacity: 8, hrmp_channel_max_total_size: 8 * 1024, hrmp_max_parachain_inbound_channels: 4, - hrmp_max_parathread_inbound_channels: 4, hrmp_channel_max_message_size: 1024 * 1024, hrmp_max_parachain_outbound_channels: 4, - hrmp_max_parathread_outbound_channels: 4, hrmp_max_message_num_per_candidate: 5, dispute_period: 6, no_show_slots: 2, diff --git a/node/test/service/src/chain_spec.rs b/node/test/service/src/chain_spec.rs index 876bbb8806b4..9aadd7d203c0 100644 --- a/node/test/service/src/chain_spec.rs +++ b/node/test/service/src/chain_spec.rs @@ -175,8 +175,7 @@ fn polkadot_testnet_genesis( max_pov_size: MAX_POV_SIZE, max_head_data_size: 32 * 1024, group_rotation_frequency: 20, - chain_availability_period: 4, - thread_availability_period: 4, + paras_availability_period: 4, no_show_slots: 10, minimum_validation_upgrade_delay: 5, ..Default::default() diff --git a/primitives/src/lib.rs b/primitives/src/lib.rs index 1c8ef1eae73b..3680cb857e66 100644 --- a/primitives/src/lib.rs +++ b/primitives/src/lib.rs @@ -56,7 +56,8 @@ pub use v5::{ UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, ValidityError, ASSIGNMENT_KEY_TYPE_ID, LOWEST_PUBLIC_ID, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, - MAX_POV_SIZE, PARACHAINS_INHERENT_IDENTIFIER, PARACHAIN_KEY_TYPE_ID, + MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, PARACHAINS_INHERENT_IDENTIFIER, + PARACHAIN_KEY_TYPE_ID, }; #[cfg(feature = "std")] diff --git a/primitives/src/v5/mod.rs b/primitives/src/v5/mod.rs index 400bf2224d46..a4a06d22435e 100644 --- a/primitives/src/v5/mod.rs +++ b/primitives/src/v5/mod.rs @@ -384,6 +384,11 @@ pub const MAX_HEAD_DATA_SIZE: u32 = 1 * 1024 * 1024; // NOTE: This value is used in the runtime so be careful when changing it. pub const MAX_POV_SIZE: u32 = 5 * 1024 * 1024; +/// Default queue size we use for the on-demand order book. +/// +/// Can be adjusted in configuration. +pub const ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE: u32 = 10_000; + // The public key of a keypair used by a validator for determining assignments /// to approve included parachain candidates. mod assignment_app { @@ -812,29 +817,63 @@ pub struct ParathreadClaim(pub Id, pub Option); pub struct ParathreadEntry { /// The claim. pub claim: ParathreadClaim, - /// Number of retries. + /// Number of retries pub retries: u32, } +/// An assignment for a parachain scheduled to be backed and included in a relay chain block. +#[derive(Clone, Encode, Decode, PartialEq, TypeInfo, RuntimeDebug)] +pub struct Assignment { + /// Assignment's ParaId + pub para_id: Id, +} + +impl Assignment { + /// Create a new `Assignment`. + pub fn new(para_id: Id) -> Self { + Self { para_id } + } +} + +/// An entry tracking a paras +#[derive(Clone, Encode, Decode, TypeInfo, PartialEq, RuntimeDebug)] +pub struct ParasEntry { + /// The `Assignment` + pub assignment: Assignment, + /// The number of times the entry has timed out in availability. + pub availability_timeouts: u32, + /// The block height where this entry becomes invalid. + pub ttl: N, +} + +impl ParasEntry { + /// Return `Id` from the underlying `Assignment`. + pub fn para_id(&self) -> Id { + self.assignment.para_id + } + + /// Create a new `ParasEntry`. + pub fn new(assignment: Assignment, now: N) -> Self { + ParasEntry { assignment, availability_timeouts: 0, ttl: now } + } +} + /// What is occupying a specific availability core. #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(PartialEq))] -pub enum CoreOccupied { +pub enum CoreOccupied { /// The core is not occupied. Free, - /// A parathread. - Parathread(ParathreadEntry), - /// A parachain. - Parachain(Id), + /// A paras. + Paras(ParasEntry), } -impl CoreOccupied { +impl CoreOccupied { /// Is core free? pub fn is_free(&self) -> bool { match self { Self::Free => true, - Self::Parachain(_) => false, - Self::Parathread(_) => false, + Self::Paras(_) => false, } } } @@ -968,7 +1007,9 @@ impl OccupiedCore { pub struct ScheduledCore { /// The ID of a para scheduled. pub para_id: Id, - /// The collator required to author the block, if any. + /// DEPRECATED: see: https://github.com/paritytech/polkadot/issues/7575 + /// + /// Will be removed in a future version. pub collator: Option, } diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index f06a9d80549e..1006b67de733 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -39,6 +39,7 @@ use scale_info::TypeInfo; use sp_std::{cmp::Ordering, collections::btree_map::BTreeMap, prelude::*}; use runtime_parachains::{ + assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, @@ -1166,11 +1167,12 @@ impl parachains_paras_inherent::Config for Runtime { type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; } -impl runtime_parachains::scheduler_parachains::Config for Runtime {} impl parachains_scheduler::Config for Runtime { - type AssignmentProvider = runtime_parachains::scheduler_parachains::Pallet; + type AssignmentProvider = ParaAssignmentProvider; } +impl parachains_assigner_parachains::Config for Runtime {} + impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; type ForceOrigin = EnsureRoot; @@ -1473,6 +1475,7 @@ construct_runtime! { ParaSessionInfo: parachains_session_info::{Pallet, Storage} = 61, ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 62, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 63, + ParaAssignmentProvider: parachains_assigner_parachains::{Pallet, Storage} = 64, // Parachain Onboarding Pallets. Start indices at 70 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event} = 70, diff --git a/runtime/kusama/src/weights/runtime_parachains_configuration.rs b/runtime/kusama/src/weights/runtime_parachains_configuration.rs index 077e9409076d..1872c9b6d104 100644 --- a/runtime/kusama/src/weights/runtime_parachains_configuration.rs +++ b/runtime/kusama/src/weights/runtime_parachains_configuration.rs @@ -140,4 +140,6 @@ impl runtime_parachains::configuration::WeightInfo for .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } + + fn set_config_with_perbill() -> Weight { todo!() } } diff --git a/runtime/parachains/src/assigner.rs b/runtime/parachains/src/assigner.rs new file mode 100644 index 000000000000..79ecbf21528d --- /dev/null +++ b/runtime/parachains/src/assigner.rs @@ -0,0 +1,119 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Polkadot multiplexing assignment provider. +//! Provides blockspace assignments for both bulk and on demand parachains. +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::{v5::Assignment, CoreIndex, Id as ParaId}; + +use crate::{configuration, paras, scheduler_common::AssignmentProvider}; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + configuration::Config + paras::Config { + type ParachainsAssignmentProvider: AssignmentProvider>; + type OnDemandAssignmentProvider: AssignmentProvider>; + } +} + +// Aliases to make the impl more readable. +type ParachainAssigner = ::ParachainsAssignmentProvider; +type OnDemandAssigner = ::OnDemandAssignmentProvider; + +impl Pallet { + // Helper fn for the AssignmentProvider implementation. + // Assumes that the first allocation of cores is to bulk parachains. + // This function will return false if there are no cores assigned to the bulk parachain assigner. + fn is_bulk_core(core_idx: &CoreIndex) -> bool { + let parachain_cores = + as AssignmentProvider>>::session_core_count(); + (0..parachain_cores).contains(&core_idx.0) + } +} + +impl AssignmentProvider> for Pallet { + fn session_core_count() -> u32 { + let parachain_cores = + as AssignmentProvider>>::session_core_count(); + let on_demand_cores = + as AssignmentProvider>>::session_core_count(); + + parachain_cores.saturating_add(on_demand_cores) + } + + /// Pops an `Assignment` from a specified `CoreIndex` + fn pop_assignment_for_core( + core_idx: CoreIndex, + concluded_para: Option, + ) -> Option { + if Pallet::::is_bulk_core(&core_idx) { + as AssignmentProvider>>::pop_assignment_for_core( + core_idx, + concluded_para, + ) + } else { + as AssignmentProvider>>::pop_assignment_for_core( + core_idx, + concluded_para, + ) + } + } + + fn push_assignment_for_core(core_idx: CoreIndex, assignment: Assignment) { + if Pallet::::is_bulk_core(&core_idx) { + as AssignmentProvider>>::push_assignment_for_core( + core_idx, assignment, + ) + } else { + as AssignmentProvider>>::push_assignment_for_core( + core_idx, assignment, + ) + } + } + + fn get_availability_period(core_idx: CoreIndex) -> BlockNumberFor { + if Pallet::::is_bulk_core(&core_idx) { + as AssignmentProvider>>::get_availability_period( + core_idx, + ) + } else { + as AssignmentProvider>>::get_availability_period( + core_idx, + ) + } + } + + fn get_max_retries(core_idx: CoreIndex) -> u32 { + if Pallet::::is_bulk_core(&core_idx) { + as AssignmentProvider>>::get_max_retries( + core_idx, + ) + } else { + as AssignmentProvider>>::get_max_retries( + core_idx, + ) + } + } +} diff --git a/runtime/parachains/src/assigner_on_demand/benchmarking.rs b/runtime/parachains/src/assigner_on_demand/benchmarking.rs new file mode 100644 index 000000000000..98260bc2f499 --- /dev/null +++ b/runtime/parachains/src/assigner_on_demand/benchmarking.rs @@ -0,0 +1,113 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! On demand assigner pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::{Pallet, *}; +use crate::{ + configuration::{HostConfiguration, Pallet as ConfigurationPallet}, + paras::{Pallet as ParasPallet, ParaGenesisArgs, ParaKind, ParachainsCache}, + shared::Pallet as ParasShared, +}; + +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; +use sp_runtime::traits::Bounded; + +use primitives::{ + HeadData, Id as ParaId, SessionIndex, ValidationCode, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; + +// Constants for the benchmarking +const SESSION_INDEX: SessionIndex = 1; + +// Initialize a parathread for benchmarking. +pub fn init_parathread(para_id: ParaId) +where + T: Config + + crate::paras::Config + + crate::shared::Config + + crate::paras::Config + + crate::shared::Config, +{ + ParasShared::::set_session_index(SESSION_INDEX); + let mut config = HostConfiguration::default(); + config.on_demand_cores = 1; + ConfigurationPallet::::force_set_active_config(config); + let mut parachains = ParachainsCache::new(); + ParasPallet::::initialize_para_now( + &mut parachains, + para_id, + &ParaGenesisArgs { + para_kind: ParaKind::Parathread, + genesis_head: HeadData(vec![1, 2, 3, 4]), + validation_code: ValidationCode(vec![1, 2, 3, 4]), + }, + ); +} + +#[benchmarks] +mod benchmarks { + /// We want to fill the queue to the maximum, so exactly one more item fits. + const MAX_FILL_BENCH: u32 = ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE.saturating_sub(1); + + use super::*; + #[benchmark] + fn place_order_keep_alive(s: Linear<1, MAX_FILL_BENCH>) { + // Setup + let caller = whitelisted_caller(); + let para_id = ParaId::from(111u32); + init_parathread::(para_id); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let assignment = Assignment::new(para_id); + + for _ in 0..s { + Pallet::::add_on_demand_assignment(assignment.clone(), QueuePushDirection::Back) + .unwrap(); + } + + #[extrinsic_call] + _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) + } + + #[benchmark] + fn place_order_allow_death(s: Linear<1, MAX_FILL_BENCH>) { + // Setup + let caller = whitelisted_caller(); + let para_id = ParaId::from(111u32); + init_parathread::(para_id); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + let assignment = Assignment::new(para_id); + + for _ in 0..s { + Pallet::::add_on_demand_assignment(assignment.clone(), QueuePushDirection::Back) + .unwrap(); + } + + #[extrinsic_call] + _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext( + crate::assigner_on_demand::mock_helpers::GenesisConfigBuilder::default().build() + ), + crate::mock::Test + ); +} diff --git a/runtime/parachains/src/assigner_on_demand/mock_helpers.rs b/runtime/parachains/src/assigner_on_demand/mock_helpers.rs new file mode 100644 index 000000000000..acfb24cbf194 --- /dev/null +++ b/runtime/parachains/src/assigner_on_demand/mock_helpers.rs @@ -0,0 +1,86 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Helper functions for tests, also used in runtime-benchmarks. + +#![cfg(test)] + +use super::*; + +use crate::{ + mock::MockGenesisConfig, + paras::{ParaGenesisArgs, ParaKind}, +}; + +use primitives::{Balance, HeadData, ValidationCode}; + +pub fn default_genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: crate::configuration::HostConfiguration { ..Default::default() }, + }, + ..Default::default() + } +} + +#[derive(Debug)] +pub struct GenesisConfigBuilder { + pub on_demand_cores: u32, + pub on_demand_base_fee: Balance, + pub on_demand_fee_variability: Perbill, + pub on_demand_max_queue_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub onboarded_on_demand_chains: Vec, +} + +impl Default for GenesisConfigBuilder { + fn default() -> Self { + Self { + on_demand_cores: 10, + on_demand_base_fee: 10_000, + on_demand_fee_variability: Perbill::from_percent(1), + on_demand_max_queue_size: 100, + on_demand_target_queue_utilization: Perbill::from_percent(25), + onboarded_on_demand_chains: vec![], + } + } +} + +impl GenesisConfigBuilder { + pub(super) fn build(self) -> MockGenesisConfig { + let mut genesis = default_genesis_config(); + let config = &mut genesis.configuration.config; + config.on_demand_cores = self.on_demand_cores; + config.on_demand_base_fee = self.on_demand_base_fee; + config.on_demand_fee_variability = self.on_demand_fee_variability; + config.on_demand_queue_max_size = self.on_demand_max_queue_size; + config.on_demand_target_queue_utilization = self.on_demand_target_queue_utilization; + + let paras = &mut genesis.paras.paras; + for para_id in self.onboarded_on_demand_chains { + paras.push(( + para_id, + ParaGenesisArgs { + genesis_head: HeadData::from(vec![0u8]), + validation_code: ValidationCode::from(vec![0u8]), + para_kind: ParaKind::Parathread, + }, + )) + } + + genesis + } +} diff --git a/runtime/parachains/src/assigner_on_demand/mod.rs b/runtime/parachains/src/assigner_on_demand/mod.rs new file mode 100644 index 000000000000..53ea1d19c4e3 --- /dev/null +++ b/runtime/parachains/src/assigner_on_demand/mod.rs @@ -0,0 +1,597 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The parachain on demand assignment module. +//! +//! Implements a mechanism for taking in orders for pay as you go (PAYG) or on demand +//! parachain (previously parathreads) assignments. This module is not handled by the +//! initializer but is instead instantiated in the `construct_runtime` macro. +//! +//! The module currently limits parallel execution of blocks from the same `ParaId` via +//! a core affinity mechanism. As long as there exists an affinity for a `CoreIndex` for +//! a specific `ParaId`, orders for blockspace for that `ParaId` will only be assigned to +//! that `CoreIndex`. This affinity mechanism can be removed if it can be shown that parallel +//! execution is valid. + +mod benchmarking; +mod mock_helpers; + +#[cfg(test)] +mod tests; + +use crate::{configuration, paras, scheduler_common::AssignmentProvider}; + +use frame_support::{ + pallet_prelude::*, + traits::{ + Currency, + ExistenceRequirement::{self, AllowDeath, KeepAlive}, + WithdrawReasons, + }, +}; +use frame_system::pallet_prelude::*; +use primitives::{v5::Assignment, CoreIndex, Id as ParaId}; +use sp_runtime::{ + traits::{One, SaturatedConversion}, + FixedPointNumber, FixedPointOperand, FixedU128, Perbill, Saturating, +}; + +use sp_std::{collections::vec_deque::VecDeque, prelude::*}; + +const LOG_TARGET: &str = "runtime::parachains::on-demand-assigner"; + +pub use pallet::*; + +pub trait WeightInfo { + fn place_order(s: u32) -> Weight; +} + +/// A weight info that is only suitable for testing. +pub struct TestWeightInfo; + +impl WeightInfo for TestWeightInfo { + fn place_order(_: u32) -> Weight { + Weight::MAX + } +} + +/// Keeps track of how many assignments a scheduler currently has at a specific `CoreIndex` for a specific `ParaId`. +#[derive(Encode, Decode, Default, Clone, Copy, TypeInfo)] +#[cfg_attr(test, derive(PartialEq, Debug))] +pub struct CoreAffinityCount { + core_idx: CoreIndex, + count: u32, +} + +/// An indicator as to which end of the `OnDemandQueue` an assignment will be placed. +pub enum QueuePushDirection { + Back, + Front, +} + +/// Shorthand for the Balance type the runtime is using. +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +/// Errors that can happen during spot traffic calculation. +#[derive(PartialEq)] +#[cfg_attr(feature = "std", derive(Debug))] +pub enum SpotTrafficCalculationErr { + /// The order queue capacity is at 0. + QueueCapacityIsZero, + /// The queue size is larger than the queue capacity. + QueueSizeLargerThanCapacity, + /// Arithmetic error during division, either division by 0 or over/underflow. + Division, +} + +#[frame_support::pallet] +pub mod pallet { + + use super::*; + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + configuration::Config + paras::Config { + /// The runtime's definition of an event. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The runtime's definition of a Currency. + type Currency: Currency; + + /// Something that provides the weight of this pallet. + type WeightInfo: WeightInfo; + + /// The default value for the spot traffic multiplier. + #[pallet::constant] + type TrafficDefaultValue: Get; + } + + /// Creates an empty spot traffic value if one isn't present in storage already. + #[pallet::type_value] + pub fn SpotTrafficOnEmpty() -> FixedU128 { + T::TrafficDefaultValue::get() + } + + /// Creates an empty on demand queue if one isn't present in storage already. + #[pallet::type_value] + pub fn OnDemandQueueOnEmpty() -> VecDeque { + VecDeque::new() + } + + /// Keeps track of the multiplier used to calculate the current spot price for the on demand assigner. + #[pallet::storage] + pub(super) type SpotTraffic = + StorageValue<_, FixedU128, ValueQuery, SpotTrafficOnEmpty>; + + /// The order storage entry. Uses a VecDeque to be able to push to the front of the + /// queue from the scheduler on session boundaries. + #[pallet::storage] + pub type OnDemandQueue = + StorageValue<_, VecDeque, ValueQuery, OnDemandQueueOnEmpty>; + + /// Maps a `ParaId` to `CoreIndex` and keeps track of how many assignments the scheduler has in it's + /// lookahead. Keeping track of this affinity prevents parallel execution of the same `ParaId` on two or more + /// `CoreIndex`es. + #[pallet::storage] + pub(super) type ParaIdAffinity = + StorageMap<_, Twox256, ParaId, CoreAffinityCount, OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// An order was placed at some spot price amount. + OnDemandOrderPlaced { para_id: ParaId, spot_price: BalanceOf }, + /// The value of the spot traffic multiplier changed. + SpotTrafficSet { traffic: FixedU128 }, + } + + #[pallet::error] + pub enum Error { + /// The `ParaId` supplied to the `place_order` call is not a valid `ParaThread`, making the call is invalid. + InvalidParaId, + /// The order queue is full, `place_order` will not continue. + QueueFull, + /// The current spot price is higher than the max amount specified in the `place_order` call, making it invalid. + SpotPriceHigherThanMaxAmount, + /// There are no on demand cores available. `place_order` will not add anything to the queue. + NoOnDemandCores, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_now: BlockNumberFor) -> Weight { + let config = >::config(); + // Calculate spot price multiplier and store it. + let old_traffic = SpotTraffic::::get(); + match Self::calculate_spot_traffic( + old_traffic, + config.on_demand_queue_max_size, + Self::queue_size(), + config.on_demand_target_queue_utilization, + config.on_demand_fee_variability, + ) { + Ok(new_traffic) => { + // Only update storage on change + if new_traffic != old_traffic { + SpotTraffic::::set(new_traffic); + Pallet::::deposit_event(Event::::SpotTrafficSet { + traffic: new_traffic, + }); + return T::DbWeight::get().reads_writes(2, 1) + } + }, + Err(SpotTrafficCalculationErr::QueueCapacityIsZero) => { + log::debug!( + target: LOG_TARGET, + "Error calculating spot traffic: The order queue capacity is at 0." + ); + }, + Err(SpotTrafficCalculationErr::QueueSizeLargerThanCapacity) => { + log::debug!( + target: LOG_TARGET, + "Error calculating spot traffic: The queue size is larger than the queue capacity." + ); + }, + Err(SpotTrafficCalculationErr::Division) => { + log::debug!( + target: LOG_TARGET, + "Error calculating spot traffic: Arithmetic error during division, either division by 0 or over/underflow." + ); + }, + }; + T::DbWeight::get().reads_writes(2, 0) + } + } + + #[pallet::call] + impl Pallet { + /// Create a single on demand core order. + /// Will use the spot price for the current block and will reap the account if needed. + /// + /// Parameters: + /// - `origin`: The sender of the call, funds will be withdrawn from this account. + /// - `max_amount`: The maximum balance to withdraw from the origin to place an order. + /// - `para_id`: A `ParaId` the origin wants to provide blockspace for. + /// + /// Errors: + /// - `InsufficientBalance`: from the Currency implementation + /// - `InvalidParaId` + /// - `QueueFull` + /// - `SpotPriceHigherThanMaxAmount` + /// - `NoOnDemandCores` + /// + /// Events: + /// - `SpotOrderPlaced` + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::place_order(OnDemandQueue::::get().len() as u32))] + pub fn place_order_allow_death( + origin: OriginFor, + max_amount: BalanceOf, + para_id: ParaId, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + Pallet::::do_place_order(sender, max_amount, para_id, AllowDeath) + } + + /// Same as the [`place_order_allow_death`] call , but with a check that placing the order + /// will not reap the account. + /// + /// Parameters: + /// - `origin`: The sender of the call, funds will be withdrawn from this account. + /// - `max_amount`: The maximum balance to withdraw from the origin to place an order. + /// - `para_id`: A `ParaId` the origin wants to provide blockspace for. + /// + /// Errors: + /// - `InsufficientBalance`: from the Currency implementation + /// - `InvalidParaId` + /// - `QueueFull` + /// - `SpotPriceHigherThanMaxAmount` + /// - `NoOnDemandCores` + /// + /// Events: + /// - `SpotOrderPlaced` + #[pallet::call_index(1)] + #[pallet::weight(::WeightInfo::place_order(OnDemandQueue::::get().len() as u32))] + pub fn place_order_keep_alive( + origin: OriginFor, + max_amount: BalanceOf, + para_id: ParaId, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + Pallet::::do_place_order(sender, max_amount, para_id, KeepAlive) + } + } +} + +impl Pallet +where + BalanceOf: FixedPointOperand, +{ + /// Helper function for `place_order_*` calls. Used to differentiate between placing orders + /// with a keep alive check or to allow the account to be reaped. + /// + /// Parameters: + /// - `sender`: The sender of the call, funds will be withdrawn from this account. + /// - `max_amount`: The maximum balance to withdraw from the origin to place an order. + /// - `para_id`: A `ParaId` the origin wants to provide blockspace for. + /// - `existence_requirement`: Whether or not to ensure that the account will not be reaped. + /// + /// Errors: + /// - `InsufficientBalance`: from the Currency implementation + /// - `InvalidParaId` + /// - `QueueFull` + /// - `SpotPriceHigherThanMaxAmount` + /// - `NoOnDemandCores` + /// + /// Events: + /// - `SpotOrderPlaced` + fn do_place_order( + sender: ::AccountId, + max_amount: BalanceOf, + para_id: ParaId, + existence_requirement: ExistenceRequirement, + ) -> DispatchResult { + let config = >::config(); + + // Are there any schedulable cores in this session + ensure!(config.on_demand_cores > 0, Error::::NoOnDemandCores); + + // Traffic always falls back to 1.0 + let traffic = SpotTraffic::::get(); + + // Calculate spot price + let spot_price: BalanceOf = + traffic.saturating_mul_int(config.on_demand_base_fee.saturated_into::>()); + + // Is the current price higher than `max_amount` + ensure!(spot_price.le(&max_amount), Error::::SpotPriceHigherThanMaxAmount); + + // Charge the sending account the spot price + T::Currency::withdraw(&sender, spot_price, WithdrawReasons::FEE, existence_requirement)?; + + let assignment = Assignment::new(para_id); + + let res = Pallet::::add_on_demand_assignment(assignment, QueuePushDirection::Back); + + match res { + Ok(_) => { + Pallet::::deposit_event(Event::::OnDemandOrderPlaced { para_id, spot_price }); + return Ok(()) + }, + Err(err) => return Err(err), + } + } + + /// The spot price multiplier. This is based on the transaction fee calculations defined in: + /// https://research.web3.foundation/Polkadot/overview/token-economics#setting-transaction-fees + /// + /// Parameters: + /// - `traffic`: The previously calculated multiplier, can never go below 1.0. + /// - `queue_capacity`: The max size of the order book. + /// - `queue_size`: How many orders are currently in the order book. + /// - `target_queue_utilisation`: How much of the queue_capacity should be ideally occupied, expressed in percentages(perbill). + /// - `variability`: A variability factor, i.e. how quickly the spot price adjusts. This number can be chosen by + /// p/(k*(1-s)) where p is the desired ratio increase in spot price over k number of blocks. + /// s is the target_queue_utilisation. A concrete example: v = 0.05/(20*(1-0.25)) = 0.0033. + /// + /// Returns: + /// - A `FixedU128` in the range of `Config::TrafficDefaultValue` - `FixedU128::MAX` on success. + /// + /// Errors: + /// - `SpotTrafficCalculationErr::QueueCapacityIsZero` + /// - `SpotTrafficCalculationErr::QueueSizeLargerThanCapacity` + /// - `SpotTrafficCalculationErr::Division` + pub(crate) fn calculate_spot_traffic( + traffic: FixedU128, + queue_capacity: u32, + queue_size: u32, + target_queue_utilisation: Perbill, + variability: Perbill, + ) -> Result { + // Return early if queue has no capacity. + if queue_capacity == 0 { + return Err(SpotTrafficCalculationErr::QueueCapacityIsZero) + } + + // Return early if queue size is greater than capacity. + if queue_size > queue_capacity { + return Err(SpotTrafficCalculationErr::QueueSizeLargerThanCapacity) + } + + // (queue_size / queue_capacity) - target_queue_utilisation + let queue_util_ratio = FixedU128::from_rational(queue_size.into(), queue_capacity.into()); + let positive = queue_util_ratio >= target_queue_utilisation.into(); + let queue_util_diff = queue_util_ratio.max(target_queue_utilisation.into()) - + queue_util_ratio.min(target_queue_utilisation.into()); + + // variability * queue_util_diff + let var_times_qud = queue_util_diff.saturating_mul(variability.into()); + + // variability^2 * queue_util_diff^2 + let var_times_qud_pow = var_times_qud.saturating_mul(var_times_qud); + + // (variability^2 * queue_util_diff^2)/2 + let div_by_two: FixedU128; + match var_times_qud_pow.const_checked_div(2.into()) { + Some(dbt) => div_by_two = dbt, + None => return Err(SpotTrafficCalculationErr::Division), + } + + // traffic * (1 + queue_util_diff) + div_by_two + if positive { + let new_traffic = queue_util_diff + .saturating_add(div_by_two) + .saturating_add(One::one()) + .saturating_mul(traffic); + Ok(new_traffic.max(::TrafficDefaultValue::get())) + } else { + let new_traffic = queue_util_diff.saturating_sub(div_by_two).saturating_mul(traffic); + Ok(new_traffic.max(::TrafficDefaultValue::get())) + } + } + + /// Adds an assignment to the on demand queue. + /// + /// Paramenters: + /// - `assignment`: The on demand assignment to add to the queue. + /// - `location`: Whether to push this entry to the back or the front of the queue. + /// Pushing an entry to the front of the queue is only used when the scheduler + /// wants to push back an entry it has already popped. + /// Returns: + /// - The unit type on success. + /// + /// Errors: + /// - `InvalidParaId` + /// - `QueueFull` + pub fn add_on_demand_assignment( + assignment: Assignment, + location: QueuePushDirection, + ) -> Result<(), DispatchError> { + // Only parathreads are valid paraids for on the go parachains. + ensure!(>::is_parathread(assignment.para_id), Error::::InvalidParaId); + + let config = >::config(); + + OnDemandQueue::::try_mutate(|queue| { + // Abort transaction if queue is too large + ensure!(Self::queue_size() < config.on_demand_queue_max_size, Error::::QueueFull); + match location { + QueuePushDirection::Back => queue.push_back(assignment), + QueuePushDirection::Front => queue.push_front(assignment), + }; + Ok(()) + }) + } + + /// Get the size of the on demand queue. + /// + /// Returns: + /// - The size of the on demand queue. + fn queue_size() -> u32 { + let config = >::config(); + match OnDemandQueue::::get().len().try_into() { + Ok(size) => return size, + Err(_) => { + log::debug!( + target: LOG_TARGET, + "Failed to fetch the on demand queue size, returning the max size." + ); + return config.on_demand_queue_max_size + }, + } + } + + /// Getter for the order queue. + pub fn get_queue() -> VecDeque { + OnDemandQueue::::get() + } + + /// Getter for the affinity tracker. + pub fn get_affinity_map(para_id: ParaId) -> Option { + ParaIdAffinity::::get(para_id) + } + + /// Decreases the affinity of a `ParaId` to a specified `CoreIndex`. + /// Subtracts from the count of the `CoreAffinityCount` if an entry is found and the core_idx matches. + /// When the count reaches 0, the entry is removed. + /// A non-existant entry is a no-op. + fn decrease_affinity(para_id: ParaId, core_idx: CoreIndex) { + ParaIdAffinity::::mutate(para_id, |maybe_affinity| { + if let Some(affinity) = maybe_affinity { + if affinity.core_idx == core_idx { + let new_count = affinity.count.saturating_sub(1); + if new_count > 0 { + *maybe_affinity = Some(CoreAffinityCount { core_idx, count: new_count }); + } else { + *maybe_affinity = None; + } + } + } + }); + } + + /// Increases the affinity of a `ParaId` to a specified `CoreIndex`. + /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_idx matches. + /// A non-existant entry will be initialized with a count of 1 and uses the supplied `CoreIndex`. + fn increase_affinity(para_id: ParaId, core_idx: CoreIndex) { + ParaIdAffinity::::mutate(para_id, |maybe_affinity| match maybe_affinity { + Some(affinity) => + if affinity.core_idx == core_idx { + *maybe_affinity = Some(CoreAffinityCount { + core_idx, + count: affinity.count.saturating_add(1), + }); + }, + None => { + *maybe_affinity = Some(CoreAffinityCount { core_idx, count: 1 }); + }, + }) + } +} + +impl AssignmentProvider> for Pallet { + fn session_core_count() -> u32 { + let config = >::config(); + config.on_demand_cores + } + + /// Take the next queued entry that is available for a given core index. + /// Invalidates and removes orders with a `para_id` that is not `ParaLifecycle::Parathread` + /// but only in [0..P] range slice of the order queue, where P is the element that is + /// removed from the order queue. + /// + /// Parameters: + /// - `core_idx`: The core index + /// - `previous_paraid`: Which paraid was previously processed on the requested core. + /// Is None if nothing was processed on the core. + fn pop_assignment_for_core( + core_idx: CoreIndex, + previous_para: Option, + ) -> Option { + // Only decrease the affinity of the previous para if it exists. + // A nonexistant `ParaId` indicates that the scheduler has not processed any + // `ParaId` this session. + if let Some(previous_para_id) = previous_para { + Pallet::::decrease_affinity(previous_para_id, core_idx) + } + + let mut queue: VecDeque = OnDemandQueue::::get(); + + let mut invalidated_para_id_indexes: Vec = vec![]; + + // Get the position of the next `ParaId`. Select either a valid `ParaId` that has an affinity + // to the same `CoreIndex` as the scheduler asks for or a valid `ParaId` with no affinity at all. + let pos = queue.iter().enumerate().position(|(index, assignment)| { + if >::is_parathread(assignment.para_id) { + match ParaIdAffinity::::get(&assignment.para_id) { + Some(affinity) => return affinity.core_idx == core_idx, + None => return true, + } + } + // Record no longer valid para_ids. + invalidated_para_id_indexes.push(index); + return false + }); + + // Collect the popped value. + let popped = pos.and_then(|p: usize| { + if let Some(assignment) = queue.remove(p) { + Pallet::::increase_affinity(assignment.para_id, core_idx); + return Some(assignment) + }; + None + }); + + // Only remove the invalid indexes *after* using the index. + // Removed in reverse order so that the indexes don't shift. + invalidated_para_id_indexes.iter().rev().for_each(|idx| { + queue.remove(*idx); + }); + + // Write changes to storage. + OnDemandQueue::::set(queue); + + popped + } + + /// Push an assignment back to the queue. + /// Typically used on session boundaries. + /// Parameters: + /// - `core_idx`: The core index + /// - `assignment`: The on demand assignment. + fn push_assignment_for_core(core_idx: CoreIndex, assignment: Assignment) { + Pallet::::decrease_affinity(assignment.para_id, core_idx); + // Skip the queue on push backs from scheduler + match Pallet::::add_on_demand_assignment(assignment, QueuePushDirection::Front) { + Ok(_) => {}, + Err(_) => {}, + } + } + + fn get_availability_period(_core_index: CoreIndex) -> BlockNumberFor { + let config = >::config(); + config.paras_availability_period + } + + fn get_max_retries(_core_idx: CoreIndex) -> u32 { + let config = >::config(); + config.on_demand_retries + } +} diff --git a/runtime/parachains/src/assigner_on_demand/tests.rs b/runtime/parachains/src/assigner_on_demand/tests.rs new file mode 100644 index 000000000000..542f78a37b7b --- /dev/null +++ b/runtime/parachains/src/assigner_on_demand/tests.rs @@ -0,0 +1,557 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; + +use crate::{ + assigner_on_demand::{mock_helpers::GenesisConfigBuilder, Error}, + initializer::SessionChangeNotification, + mock::{ + new_test_ext, Balances, OnDemandAssigner, Paras, ParasShared, RuntimeOrigin, Scheduler, + System, Test, + }, + paras::{ParaGenesisArgs, ParaKind}, +}; +use frame_support::{assert_noop, assert_ok, error::BadOrigin}; +use pallet_balances::Error as BalancesError; +use primitives::{ + v5::{Assignment, ValidationCode}, + BlockNumber, SessionIndex, +}; +use sp_std::collections::btree_map::BTreeMap; + +fn schedule_blank_para(id: ParaId, parakind: ParaKind) { + let validation_code: ValidationCode = vec![1, 2, 3].into(); + assert_ok!(Paras::schedule_para_initialize( + id, + ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: validation_code.clone(), + para_kind: parakind, + } + )); + + assert_ok!(Paras::add_trusted_validation_code(RuntimeOrigin::root(), validation_code)); +} + +fn run_to_block( + to: BlockNumber, + new_session: impl Fn(BlockNumber) -> Option>, +) { + while System::block_number() < to { + let b = System::block_number(); + + Scheduler::initializer_finalize(); + Paras::initializer_finalize(b); + + if let Some(notification) = new_session(b + 1) { + let mut notification_with_session_index = notification; + // We will make every session change trigger an action queue. Normally this may require 2 or more session changes. + if notification_with_session_index.session_index == SessionIndex::default() { + notification_with_session_index.session_index = ParasShared::scheduled_session(); + } + Paras::initializer_on_new_session(¬ification_with_session_index); + Scheduler::initializer_on_new_session(¬ification_with_session_index); + } + + System::on_finalize(b); + + System::on_initialize(b + 1); + System::set_block_number(b + 1); + + Paras::initializer_initialize(b + 1); + Scheduler::initializer_initialize(b + 1); + + // In the real runtime this is expected to be called by the `InclusionInherent` pallet. + Scheduler::update_claimqueue(BTreeMap::new(), b + 1); + } +} + +#[test] +fn spot_traffic_capacity_zero_returns_none() { + match OnDemandAssigner::calculate_spot_traffic( + FixedU128::from(u128::MAX), + 0u32, + u32::MAX, + Perbill::from_percent(100), + Perbill::from_percent(1), + ) { + Ok(_) => panic!("Error"), + Err(e) => assert_eq!(e, SpotTrafficCalculationErr::QueueCapacityIsZero), + }; +} + +#[test] +fn spot_traffic_queue_size_larger_than_capacity_returns_none() { + match OnDemandAssigner::calculate_spot_traffic( + FixedU128::from(u128::MAX), + 1u32, + 2u32, + Perbill::from_percent(100), + Perbill::from_percent(1), + ) { + Ok(_) => panic!("Error"), + Err(e) => assert_eq!(e, SpotTrafficCalculationErr::QueueSizeLargerThanCapacity), + } +} + +#[test] +fn spot_traffic_calculation_identity() { + match OnDemandAssigner::calculate_spot_traffic( + FixedU128::from_u32(1), + 1000, + 100, + Perbill::from_percent(10), + Perbill::from_percent(3), + ) { + Ok(res) => { + assert_eq!(res, FixedU128::from_u32(1)) + }, + _ => (), + } +} + +#[test] +fn spot_traffic_calculation_u32_max() { + match OnDemandAssigner::calculate_spot_traffic( + FixedU128::from_u32(1), + u32::MAX, + u32::MAX, + Perbill::from_percent(100), + Perbill::from_percent(3), + ) { + Ok(res) => { + assert_eq!(res, FixedU128::from_u32(1)) + }, + _ => panic!("Error"), + }; +} + +#[test] +fn spot_traffic_calculation_u32_traffic_max() { + match OnDemandAssigner::calculate_spot_traffic( + FixedU128::from(u128::MAX), + u32::MAX, + u32::MAX, + Perbill::from_percent(1), + Perbill::from_percent(1), + ) { + Ok(res) => assert_eq!(res, FixedU128::from(u128::MAX)), + _ => panic!("Error"), + }; +} + +#[test] +fn sustained_target_increases_spot_traffic() { + let mut traffic = FixedU128::from_u32(1u32); + for _ in 0..50 { + traffic = OnDemandAssigner::calculate_spot_traffic( + traffic, + 100, + 12, + Perbill::from_percent(10), + Perbill::from_percent(100), + ) + .unwrap() + } + assert_eq!(traffic, FixedU128::from_inner(2_718_103_312_071_174_015u128)) +} + +#[test] +fn spot_traffic_can_decrease() { + let traffic = FixedU128::from_u32(100u32); + match OnDemandAssigner::calculate_spot_traffic( + traffic, + 100u32, + 0u32, + Perbill::from_percent(100), + Perbill::from_percent(100), + ) { + Ok(new_traffic) => + assert_eq!(new_traffic, FixedU128::from_inner(50_000_000_000_000_000_000u128)), + _ => panic!("Error"), + } +} + +#[test] +fn spot_traffic_decreases_over_time() { + let mut traffic = FixedU128::from_u32(100u32); + for _ in 0..5 { + traffic = OnDemandAssigner::calculate_spot_traffic( + traffic, + 100u32, + 0u32, + Perbill::from_percent(100), + Perbill::from_percent(100), + ) + .unwrap(); + println!("{traffic}"); + } + assert_eq!(traffic, FixedU128::from_inner(3_125_000_000_000_000_000u128)) +} + +#[test] +fn place_order_works() { + let alice = 1u64; + let amt = 10_000_000u128; + let para_id = ParaId::from(111); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Initialize the parathread and wait for it to be ready. + schedule_blank_para(para_id, ParaKind::Parathread); + + assert!(!Paras::is_parathread(para_id)); + + run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); + + assert!(Paras::is_parathread(para_id)); + + // Does not work unsigned + assert_noop!( + OnDemandAssigner::place_order_allow_death(RuntimeOrigin::none(), amt, para_id), + BadOrigin + ); + + // Does not work with max_amount lower than fee + let low_max_amt = 1u128; + assert_noop!( + OnDemandAssigner::place_order_allow_death( + RuntimeOrigin::signed(alice), + low_max_amt, + para_id, + ), + Error::::SpotPriceHigherThanMaxAmount, + ); + + // Does not work with insufficient balance + assert_noop!( + OnDemandAssigner::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id), + BalancesError::::InsufficientBalance + ); + + // Works + Balances::make_free_balance_be(&alice, amt); + run_to_block(101, |n| if n == 101 { Some(Default::default()) } else { None }); + assert_ok!(OnDemandAssigner::place_order_allow_death( + RuntimeOrigin::signed(alice), + amt, + para_id + )); + }); +} + +#[test] +fn place_order_keep_alive_keeps_alive() { + let alice = 1u64; + let amt = 1u128; // The same as crate::mock's EXISTENTIAL_DEPOSIT + let max_amt = 10_000_000u128; + let para_id = ParaId::from(111); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Initialize the parathread and wait for it to be ready. + schedule_blank_para(para_id, ParaKind::Parathread); + Balances::make_free_balance_be(&alice, amt); + + assert!(!Paras::is_parathread(para_id)); + run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); + assert!(Paras::is_parathread(para_id)); + + assert_noop!( + OnDemandAssigner::place_order_keep_alive( + RuntimeOrigin::signed(alice), + max_amt, + para_id + ), + BalancesError::::InsufficientBalance + ); + }); +} + +#[test] +fn add_on_demand_assignment_works() { + let para_a = ParaId::from(111); + let assignment = Assignment::new(para_a); + + let mut genesis = GenesisConfigBuilder::default(); + genesis.on_demand_max_queue_size = 1; + new_test_ext(genesis.build()).execute_with(|| { + // Initialize the parathread and wait for it to be ready. + schedule_blank_para(para_a, ParaKind::Parathread); + + // `para_a` is not onboarded as a parathread yet. + assert_noop!( + OnDemandAssigner::add_on_demand_assignment( + assignment.clone(), + QueuePushDirection::Back + ), + Error::::InvalidParaId + ); + + assert!(!Paras::is_parathread(para_a)); + run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); + assert!(Paras::is_parathread(para_a)); + + // `para_a` is now onboarded as a valid parathread. + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment.clone(), + QueuePushDirection::Back + )); + + // Max queue size is 1, queue should be full. + assert_noop!( + OnDemandAssigner::add_on_demand_assignment(assignment, QueuePushDirection::Back), + Error::::QueueFull + ); + }); +} + +#[test] +fn spotqueue_push_directions() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + let para_b = ParaId::from(222); + let para_c = ParaId::from(333); + + schedule_blank_para(para_a, ParaKind::Parathread); + schedule_blank_para(para_b, ParaKind::Parathread); + schedule_blank_para(para_c, ParaKind::Parathread); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + let assignment_a = Assignment { para_id: para_a }; + let assignment_b = Assignment { para_id: para_b }; + let assignment_c = Assignment { para_id: para_c }; + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a.clone(), + QueuePushDirection::Front + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_b.clone(), + QueuePushDirection::Front + )); + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_c.clone(), + QueuePushDirection::Back + )); + + assert_eq!(OnDemandAssigner::queue_size(), 3); + assert_eq!( + OnDemandAssigner::get_queue(), + VecDeque::from(vec![assignment_b, assignment_a, assignment_c]) + ) + }); +} + +#[test] +fn affinity_changes_work() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + schedule_blank_para(para_a, ParaKind::Parathread); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + let assignment_a = Assignment { para_id: para_a }; + // There should be no affinity before starting. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + + // Add enough assignments to the order queue. + for _ in 0..10 { + OnDemandAssigner::add_on_demand_assignment( + assignment_a.clone(), + QueuePushDirection::Front, + ) + .expect("Invalid paraid or queue full"); + } + + // There should be no affinity before the scheduler pops. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + + // Affinity count is 1 after popping. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); + + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a.clone())); + + // Affinity count is 1 after popping with a previous para. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 1); + assert_eq!(OnDemandAssigner::queue_size(), 8); + + for _ in 0..3 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + } + + // Affinity count is 4 after popping 3 times without a previous para. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); + assert_eq!(OnDemandAssigner::queue_size(), 5); + + for _ in 0..5 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a.clone())); + } + + // Affinity count should still be 4 but queue should be empty. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 4); + assert_eq!(OnDemandAssigner::queue_size(), 0); + + // Pop 4 times and get to exactly 0 (None) affinity. + for _ in 0..4 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a.clone())); + } + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + + // Decreasing affinity beyond 0 should still be None. + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a.clone())); + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + }); +} + +#[test] +fn affinity_prohibits_parallel_scheduling() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let para_a = ParaId::from(111); + let para_b = ParaId::from(222); + + schedule_blank_para(para_a, ParaKind::Parathread); + schedule_blank_para(para_b, ParaKind::Parathread); + + run_to_block(11, |n| if n == 11 { Some(Default::default()) } else { None }); + + let assignment_a = Assignment { para_id: para_a }; + let assignment_b = Assignment { para_id: para_b }; + + // There should be no affinity before starting. + assert!(OnDemandAssigner::get_affinity_map(para_a).is_none()); + assert!(OnDemandAssigner::get_affinity_map(para_b).is_none()); + + // Add 2 assignments for para_a for every para_b. + OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_assignment(assignment_b.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + assert_eq!(OnDemandAssigner::queue_size(), 3); + + // Approximate having 1 core. + for _ in 0..3 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + } + + // Affinity on one core is meaningless. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); + assert_eq!( + OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, + OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx + ); + + // Clear affinity + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a.clone())); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_a.clone())); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_b.clone())); + + // Add 2 assignments for para_a for every para_b. + OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_assignment(assignment_a.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + OnDemandAssigner::add_on_demand_assignment(assignment_b.clone(), QueuePushDirection::Back) + .expect("Invalid paraid or queue full"); + + // Approximate having 2 cores. + for _ in 0..3 { + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None); + OnDemandAssigner::pop_assignment_for_core(CoreIndex(1), None); + } + + // Affinity should be the same as before, but on different cores. + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().count, 2); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().count, 1); + assert_eq!(OnDemandAssigner::get_affinity_map(para_a).unwrap().core_idx, CoreIndex(0)); + assert_eq!(OnDemandAssigner::get_affinity_map(para_b).unwrap().core_idx, CoreIndex(1)); + }); +} + +#[test] +fn cannot_place_order_when_no_on_demand_cores() { + let mut genesis = GenesisConfigBuilder::default(); + genesis.on_demand_cores = 0; + let para_id = ParaId::from(10); + let alice = 1u64; + let amt = 10_000_000u128; + + new_test_ext(genesis.build()).execute_with(|| { + schedule_blank_para(para_id, ParaKind::Parathread); + Balances::make_free_balance_be(&alice, amt); + + assert!(!Paras::is_parathread(para_id)); + + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + + assert!(Paras::is_parathread(para_id)); + + assert_noop!( + OnDemandAssigner::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id), + Error::::NoOnDemandCores + ); + }); +} + +#[test] +fn on_demand_orders_cannot_be_popped_if_lifecycle_changes() { + let para_id = ParaId::from(10); + let assignment = Assignment { para_id }; + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // Register the para_id as a parathread + schedule_blank_para(para_id, ParaKind::Parathread); + + assert!(!Paras::is_parathread(para_id)); + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + assert!(Paras::is_parathread(para_id)); + + // Add two assignments for a para_id with a valid lifecycle. + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment.clone(), + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment.clone(), + QueuePushDirection::Back + )); + + // First pop is fine + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), None) == Some(assignment)); + + // Deregister para + assert_ok!(Paras::schedule_para_cleanup(para_id)); + + // Run to new session and verify that para_id is no longer a valid parathread. + assert!(Paras::is_parathread(para_id)); + run_to_block(20, |n| if n == 20 { Some(Default::default()) } else { None }); + assert!(!Paras::is_parathread(para_id)); + + // Second pop should be None. + assert!(OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(para_id)) == None); + }); +} diff --git a/runtime/parachains/src/scheduler_parachains/mod.rs b/runtime/parachains/src/assigner_parachains.rs similarity index 61% rename from runtime/parachains/src/scheduler_parachains/mod.rs rename to runtime/parachains/src/assigner_parachains.rs index 223afa77ae14..06f445394caf 100644 --- a/runtime/parachains/src/scheduler_parachains/mod.rs +++ b/runtime/parachains/src/assigner_parachains.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -14,15 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use frame_system::pallet_prelude::BlockNumberFor; - -use primitives::{CoreIndex, Id as ParaId}; - -use crate::{configuration, paras, scheduler_common::Assignment}; +//! The bulk (parachain slot auction) blockspace assignment provider. +//! This provider is tightly coupled with the configuration and paras modules. +use crate::{configuration, paras, scheduler_common::AssignmentProvider}; +use frame_system::pallet_prelude::BlockNumberFor; pub use pallet::*; - -use crate::scheduler_common::AssignmentProvider; +use primitives::{v5::Assignment, CoreIndex, Id as ParaId}; #[frame_support::pallet] pub mod pallet { @@ -33,22 +31,14 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: - frame_system::Config - + configuration::Config - + paras::Config - + crate::scheduler::pallet::Config - { - } + pub trait Config: frame_system::Config + configuration::Config + paras::Config {} } -impl AssignmentProvider for Pallet { +impl AssignmentProvider> for Pallet { fn session_core_count() -> u32 { >::parachains().len() as u32 } - fn new_session() {} - fn pop_assignment_for_core( core_idx: CoreIndex, _concluded_para: Option, @@ -56,15 +46,19 @@ impl AssignmentProvider for Pallet { >::parachains() .get(core_idx.0 as usize) .copied() - .map(Assignment::Parachain) + .map(|para_id| Assignment::new(para_id)) } + /// Bulk assignment has no need to push the assignment back on a session change, + /// this is a no-op in the case of a bulk assignment slot. fn push_assignment_for_core(_: CoreIndex, _: Assignment) {} fn get_availability_period(_: CoreIndex) -> BlockNumberFor { - >::config().chain_availability_period + >::config().paras_availability_period } + /// There are retries set up in bulk assignment as the next slot already goes to + /// same [`ParaId`]. fn get_max_retries(_: CoreIndex) -> u32 { 0 } diff --git a/runtime/parachains/src/builder.rs b/runtime/parachains/src/builder.rs index 7a32a44ba6ce..0f9ba4f34efd 100644 --- a/runtime/parachains/src/builder.rs +++ b/runtime/parachains/src/builder.rs @@ -24,13 +24,14 @@ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use primitives::{ - collator_signature_payload, AvailabilityBitfield, BackedCandidate, CandidateCommitments, - CandidateDescriptor, CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt, - CompactStatement, CoreIndex, CoreOccupied, DisputeStatement, DisputeStatementSet, GroupIndex, - HeadData, Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, - InvalidDisputeStatementKind, PersistedValidationData, SessionIndex, SigningContext, - UncheckedSigned, ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, - ValidityAttestation, + collator_signature_payload, + v5::{Assignment, ParasEntry}, + AvailabilityBitfield, BackedCandidate, CandidateCommitments, CandidateDescriptor, + CandidateHash, CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, + CoreIndex, CoreOccupied, DisputeStatement, DisputeStatementSet, GroupIndex, HeadData, + Id as ParaId, IndexedVec, InherentData as ParachainsInherentData, InvalidDisputeStatementKind, + PersistedValidationData, SessionIndex, SigningContext, UncheckedSigned, + ValidDisputeStatementKind, ValidationCode, ValidatorId, ValidatorIndex, ValidityAttestation, }; use sp_core::{sr25519, H256}; use sp_runtime::{ @@ -687,12 +688,19 @@ impl BenchBuilder { ); assert_eq!(inclusion::PendingAvailability::::iter().count(), used_cores as usize,); + let config = >::config(); + let now = >::block_number() + One::one(); // Mark all the used cores as occupied. We expect that their are `backed_and_concluding_cores` // that are pending availability and that there are `used_cores - backed_and_concluding_cores ` // which are about to be disputed. let cores = (0..used_cores) .into_iter() - .map(|i| CoreOccupied::Parachain(ParaId::from(i as u32))) + .map(|i| { + CoreOccupied::Paras(ParasEntry::new( + Assignment::new(ParaId::from(i as u32)), + now + config.on_demand_ttl, + )) + }) .collect(); scheduler::AvailabilityCores::::set(cores); diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 38a24211fb67..6e2cd8a8a652 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -25,9 +25,9 @@ use parity_scale_codec::{Decode, Encode}; use polkadot_parachain::primitives::{MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM}; use primitives::{ vstaging::AsyncBackingParams, Balance, ExecutorParams, SessionIndex, MAX_CODE_SIZE, - MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, + MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; -use sp_runtime::traits::Zero; +use sp_runtime::{traits::Zero, Perbill}; use sp_std::prelude::*; #[cfg(test)] @@ -42,7 +42,7 @@ pub use pallet::*; const LOG_TARGET: &str = "runtime::configuration"; -/// All configuration of the runtime with respect to parachains and parathreads. +/// All configuration of the runtime with respect to paras. #[derive( Clone, Encode, @@ -113,7 +113,7 @@ pub struct HostConfiguration { /// been completed. /// /// Note, there are situations in which `expected_at` in the past. For example, if - /// [`chain_availability_period`] or [`thread_availability_period`] is less than the delay set by + /// [`paras_availability_period`] is less than the delay set by /// this field or if PVF pre-check took more time than the delay. In such cases, the upgrade is /// further at the earliest possible time determined by [`minimum_validation_upgrade_delay`]. /// @@ -142,8 +142,6 @@ pub struct HostConfiguration { pub max_downward_message_size: u32, /// The maximum number of outbound HRMP channels a parachain is allowed to open. pub hrmp_max_parachain_outbound_channels: u32, - /// The maximum number of outbound HRMP channels a parathread is allowed to open. - pub hrmp_max_parathread_outbound_channels: u32, /// The deposit that the sender should provide for opening an HRMP channel. pub hrmp_sender_deposit: Balance, /// The deposit that the recipient should provide for accepting opening an HRMP channel. @@ -154,8 +152,6 @@ pub struct HostConfiguration { pub hrmp_channel_max_total_size: u32, /// The maximum number of inbound HRMP channels a parachain is allowed to accept. pub hrmp_max_parachain_inbound_channels: u32, - /// The maximum number of inbound HRMP channels a parathread is allowed to accept. - pub hrmp_max_parathread_inbound_channels: u32, /// The maximum size of a message that could ever be put into an HRMP channel. /// /// This parameter affects the upper bound of size of `CandidateCommitments`. @@ -170,26 +166,33 @@ pub struct HostConfiguration { /// How long to keep code on-chain, in blocks. This should be sufficiently long that disputes /// have concluded. pub code_retention_period: BlockNumber, - /// The amount of execution cores to dedicate to parathread execution. - pub parathread_cores: u32, - /// The number of retries that a parathread author has to submit their block. - pub parathread_retries: u32, + /// The amount of execution cores to dedicate to on demand execution. + pub on_demand_cores: u32, + /// The number of retries that a on demand author has to submit their block. + pub on_demand_retries: u32, + /// The maximum queue size of the pay as you go module. + pub on_demand_queue_max_size: u32, + /// The target utilization of the spot price queue in percentages. + pub on_demand_target_queue_utilization: Perbill, + /// How quickly the fee rises in reaction to increased utilization. + /// The lower the number the slower the increase. + pub on_demand_fee_variability: Perbill, + /// The minimum amount needed to claim a slot in the spot pricing queue. + pub on_demand_base_fee: Balance, + /// The number of blocks an on demand claim stays in the scheduler's claimqueue before getting cleared. + /// This number should go reasonably higher than the number of blocks in the async backing lookahead. + pub on_demand_ttl: BlockNumber, /// How often parachain groups should be rotated across parachains. /// /// Must be non-zero. pub group_rotation_frequency: BlockNumber, - /// The availability period, in blocks, for parachains. This is the amount of blocks + /// The availability period, in blocks. This is the amount of blocks /// after inclusion that validators have to make the block available and signal its availability to /// the chain. /// /// Must be at least 1. - pub chain_availability_period: BlockNumber, - /// The availability period, in blocks, for parathreads. Same as the `chain_availability_period`, - /// but a differing timeout due to differing requirements. - /// - /// Must be at least 1. - pub thread_availability_period: BlockNumber, - /// The amount of blocks ahead to schedule parachains and parathreads. + pub paras_availability_period: BlockNumber, + /// The amount of blocks ahead to schedule paras. pub scheduling_lookahead: u32, /// The maximum number of validators to have per core. /// @@ -236,8 +239,7 @@ pub struct HostConfiguration { /// To prevent that, we introduce the minimum number of blocks after which the upgrade can be /// scheduled. This number is controlled by this field. /// - /// This value should be greater than [`chain_availability_period`] and - /// [`thread_availability_period`]. + /// This value should be greater than [`paras_availability_period`]. pub minimum_validation_upgrade_delay: BlockNumber, } @@ -249,8 +251,7 @@ impl> Default for HostConfiguration> Default for HostConfiguration> Default for HostConfiguration> Default for HostConfiguration { /// `group_rotation_frequency` is set to zero. ZeroGroupRotationFrequency, - /// `chain_availability_period` is set to zero. - ZeroChainAvailabilityPeriod, - /// `thread_availability_period` is set to zero. - ZeroThreadAvailabilityPeriod, + /// `paras_availability_period` is set to zero. + ZeroParasAvailabilityPeriod, /// `no_show_slots` is set to zero. ZeroNoShowSlots, /// `max_code_size` exceeds the hard limit of `MAX_CODE_SIZE`. @@ -308,15 +310,10 @@ pub enum InconsistentError { MaxHeadDataSizeExceedHardLimit { max_head_data_size: u32 }, /// `max_pov_size` exceeds the hard limit of `MAX_POV_SIZE`. MaxPovSizeExceedHardLimit { max_pov_size: u32 }, - /// `minimum_validation_upgrade_delay` is less than `chain_availability_period`. + /// `minimum_validation_upgrade_delay` is less than `paras_availability_period`. MinimumValidationUpgradeDelayLessThanChainAvailabilityPeriod { minimum_validation_upgrade_delay: BlockNumber, - chain_availability_period: BlockNumber, - }, - /// `minimum_validation_upgrade_delay` is less than `thread_availability_period`. - MinimumValidationUpgradeDelayLessThanThreadAvailabilityPeriod { - minimum_validation_upgrade_delay: BlockNumber, - thread_availability_period: BlockNumber, + paras_availability_period: BlockNumber, }, /// `validation_upgrade_delay` is less than or equal 1. ValidationUpgradeDelayIsTooLow { validation_upgrade_delay: BlockNumber }, @@ -348,12 +345,8 @@ where return Err(ZeroGroupRotationFrequency) } - if self.chain_availability_period.is_zero() { - return Err(ZeroChainAvailabilityPeriod) - } - - if self.thread_availability_period.is_zero() { - return Err(ZeroThreadAvailabilityPeriod) + if self.paras_availability_period.is_zero() { + return Err(ZeroParasAvailabilityPeriod) } if self.no_show_slots.is_zero() { @@ -374,15 +367,10 @@ where return Err(MaxPovSizeExceedHardLimit { max_pov_size: self.max_pov_size }) } - if self.minimum_validation_upgrade_delay <= self.chain_availability_period { + if self.minimum_validation_upgrade_delay <= self.paras_availability_period { return Err(MinimumValidationUpgradeDelayLessThanChainAvailabilityPeriod { minimum_validation_upgrade_delay: self.minimum_validation_upgrade_delay.clone(), - chain_availability_period: self.chain_availability_period.clone(), - }) - } else if self.minimum_validation_upgrade_delay <= self.thread_availability_period { - return Err(MinimumValidationUpgradeDelayLessThanThreadAvailabilityPeriod { - minimum_validation_upgrade_delay: self.minimum_validation_upgrade_delay.clone(), - thread_availability_period: self.thread_availability_period.clone(), + paras_availability_period: self.paras_availability_period.clone(), }) } @@ -441,6 +429,7 @@ pub trait WeightInfo { fn set_config_with_balance() -> Weight; fn set_hrmp_open_request_ttl() -> Weight; fn set_config_with_executor_params() -> Weight; + fn set_config_with_perbill() -> Weight; } pub struct TestWeightInfo; @@ -463,6 +452,9 @@ impl WeightInfo for TestWeightInfo { fn set_config_with_executor_params() -> Weight { Weight::MAX } + fn set_config_with_perbill() -> Weight { + Weight::MAX + } } #[frame_support::pallet] @@ -625,29 +617,29 @@ pub mod pallet { }) } - /// Set the number of parathread execution cores. + /// Set the number of on demand execution cores. #[pallet::call_index(6)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), DispatchClass::Operational, ))] - pub fn set_parathread_cores(origin: OriginFor, new: u32) -> DispatchResult { + pub fn set_on_demand_cores(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.parathread_cores = new; + config.on_demand_cores = new; }) } - /// Set the number of retries for a particular parathread. + /// Set the number of retries for a particular on demand. #[pallet::call_index(7)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), DispatchClass::Operational, ))] - pub fn set_parathread_retries(origin: OriginFor, new: u32) -> DispatchResult { + pub fn set_on_demand_retries(origin: OriginFor, new: u32) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.parathread_retries = new; + config.on_demand_retries = new; }) } @@ -667,35 +659,19 @@ pub mod pallet { }) } - /// Set the availability period for parachains. + /// Set the availability period for paras. #[pallet::call_index(9)] #[pallet::weight(( T::WeightInfo::set_config_with_block_number(), DispatchClass::Operational, ))] - pub fn set_chain_availability_period( - origin: OriginFor, - new: BlockNumberFor, - ) -> DispatchResult { - ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.chain_availability_period = new; - }) - } - - /// Set the availability period for parathreads. - #[pallet::call_index(10)] - #[pallet::weight(( - T::WeightInfo::set_config_with_block_number(), - DispatchClass::Operational, - ))] - pub fn set_thread_availability_period( + pub fn set_paras_availability_period( origin: OriginFor, new: BlockNumberFor, ) -> DispatchResult { ensure_root(origin)?; Self::schedule_config_update(|config| { - config.thread_availability_period = new; + config.paras_availability_period = new; }) } @@ -987,22 +963,6 @@ pub mod pallet { }) } - /// Sets the maximum number of inbound HRMP channels a parathread is allowed to accept. - #[pallet::call_index(35)] - #[pallet::weight(( - T::WeightInfo::set_config_with_u32(), - DispatchClass::Operational, - ))] - pub fn set_hrmp_max_parathread_inbound_channels( - origin: OriginFor, - new: u32, - ) -> DispatchResult { - ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.hrmp_max_parathread_inbound_channels = new; - }) - } - /// Sets the maximum size of a message that could ever be put into an HRMP channel. #[pallet::call_index(36)] #[pallet::weight(( @@ -1032,22 +992,6 @@ pub mod pallet { }) } - /// Sets the maximum number of outbound HRMP channels a parathread is allowed to open. - #[pallet::call_index(38)] - #[pallet::weight(( - T::WeightInfo::set_config_with_u32(), - DispatchClass::Operational, - ))] - pub fn set_hrmp_max_parathread_outbound_channels( - origin: OriginFor, - new: u32, - ) -> DispatchResult { - ensure_root(origin)?; - Self::schedule_config_update(|config| { - config.hrmp_max_parathread_outbound_channels = new; - }) - } - /// Sets the maximum number of outbound HRMP messages can be sent by a candidate. #[pallet::call_index(39)] #[pallet::weight(( @@ -1137,6 +1081,72 @@ pub mod pallet { config.executor_params = new; }) } + + /// Set the on demand (parathreads) base fee. + #[pallet::call_index(47)] + #[pallet::weight(( + T::WeightInfo::set_config_with_balance(), + DispatchClass::Operational, + ))] + pub fn set_on_demand_base_fee(origin: OriginFor, new: Balance) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.on_demand_base_fee = new; + }) + } + + /// Set the on demand (parathreads) fee variability. + #[pallet::call_index(48)] + #[pallet::weight(( + T::WeightInfo::set_config_with_perbill(), + DispatchClass::Operational, + ))] + pub fn set_on_demand_fee_variability(origin: OriginFor, new: Perbill) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.on_demand_fee_variability = new; + }) + } + + /// Set the on demand (parathreads) queue max size. + #[pallet::call_index(49)] + #[pallet::weight(( + T::WeightInfo::set_config_with_option_u32(), + DispatchClass::Operational, + ))] + pub fn set_on_demand_queue_max_size(origin: OriginFor, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.on_demand_queue_max_size = new; + }) + } + /// Set the on demand (parathreads) fee variability. + #[pallet::call_index(50)] + #[pallet::weight(( + T::WeightInfo::set_config_with_perbill(), + DispatchClass::Operational, + ))] + pub fn set_on_demand_target_queue_utilization( + origin: OriginFor, + new: Perbill, + ) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.on_demand_target_queue_utilization = new; + }) + } + /// Set the on demand (parathreads) ttl in the claimqueue. + #[pallet::call_index(51)] + #[pallet::weight(( + T::WeightInfo::set_config_with_block_number(), + DispatchClass::Operational + ))] + pub fn set_on_demand_ttl(origin: OriginFor, new: BlockNumberFor) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.on_demand_ttl = new; + }) + } } #[pallet::hooks] diff --git a/runtime/parachains/src/configuration/benchmarking.rs b/runtime/parachains/src/configuration/benchmarking.rs index ef8fafd91c96..d9d11ab56e49 100644 --- a/runtime/parachains/src/configuration/benchmarking.rs +++ b/runtime/parachains/src/configuration/benchmarking.rs @@ -47,6 +47,8 @@ benchmarks! { ExecutorParam::PvfExecTimeout(PvfExecTimeoutKind::Approval, 12_000), ][..])) + set_config_with_perbill {}: set_on_demand_fee_variability(RawOrigin::Root, Perbill::from_percent(100)) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(Default::default()), diff --git a/runtime/parachains/src/configuration/migration/v7.rs b/runtime/parachains/src/configuration/migration/v7.rs index cdff80a31a3a..c7e9b9b01eb3 100644 --- a/runtime/parachains/src/configuration/migration/v7.rs +++ b/runtime/parachains/src/configuration/migration/v7.rs @@ -24,6 +24,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::BlockNumberFor; use primitives::SessionIndex; +use sp_runtime::Perbill; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; @@ -118,21 +119,18 @@ validation_upgrade_cooldown : pre.validation_upgrade_cooldown, validation_upgrade_delay : pre.validation_upgrade_delay, max_pov_size : pre.max_pov_size, max_downward_message_size : pre.max_downward_message_size, -hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, -hrmp_max_parathread_outbound_channels : pre.hrmp_max_parathread_outbound_channels, hrmp_sender_deposit : pre.hrmp_sender_deposit, hrmp_recipient_deposit : pre.hrmp_recipient_deposit, hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, -hrmp_max_parathread_inbound_channels : pre.hrmp_max_parathread_inbound_channels, +hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, code_retention_period : pre.code_retention_period, -parathread_cores : pre.parathread_cores, -parathread_retries : pre.parathread_retries, +on_demand_cores : pre.parathread_cores, +on_demand_retries : pre.parathread_retries, group_rotation_frequency : pre.group_rotation_frequency, -chain_availability_period : pre.chain_availability_period, -thread_availability_period : pre.thread_availability_period, +paras_availability_period : pre.chain_availability_period, scheduling_lookahead : pre.scheduling_lookahead, max_validators_per_core : pre.max_validators_per_core, max_validators : pre.max_validators, @@ -147,6 +145,11 @@ pvf_voting_ttl : pre.pvf_voting_ttl, minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, async_backing_params : pre.async_backing_params, executor_params : pre.executor_params, +on_demand_queue_max_size : 10_000u32, +on_demand_base_fee : 10_000_000u128, +on_demand_fee_variability : Perbill::from_percent(3), +on_demand_target_queue_utilization : Perbill::from_percent(25), +on_demand_ttl : 5u32.into(), } }; @@ -258,20 +261,17 @@ mod tests { assert_eq!(v6.max_pov_size , v7.max_pov_size); assert_eq!(v6.max_downward_message_size , v7.max_downward_message_size); assert_eq!(v6.hrmp_max_parachain_outbound_channels , v7.hrmp_max_parachain_outbound_channels); - assert_eq!(v6.hrmp_max_parathread_outbound_channels , v7.hrmp_max_parathread_outbound_channels); assert_eq!(v6.hrmp_sender_deposit , v7.hrmp_sender_deposit); assert_eq!(v6.hrmp_recipient_deposit , v7.hrmp_recipient_deposit); assert_eq!(v6.hrmp_channel_max_capacity , v7.hrmp_channel_max_capacity); assert_eq!(v6.hrmp_channel_max_total_size , v7.hrmp_channel_max_total_size); assert_eq!(v6.hrmp_max_parachain_inbound_channels , v7.hrmp_max_parachain_inbound_channels); - assert_eq!(v6.hrmp_max_parathread_inbound_channels , v7.hrmp_max_parathread_inbound_channels); assert_eq!(v6.hrmp_channel_max_message_size , v7.hrmp_channel_max_message_size); assert_eq!(v6.code_retention_period , v7.code_retention_period); - assert_eq!(v6.parathread_cores , v7.parathread_cores); - assert_eq!(v6.parathread_retries , v7.parathread_retries); + assert_eq!(v6.on_demand_cores , v7.on_demand_cores); + assert_eq!(v6.on_demand_retries , v7.on_demand_retries); assert_eq!(v6.group_rotation_frequency , v7.group_rotation_frequency); - assert_eq!(v6.chain_availability_period , v7.chain_availability_period); - assert_eq!(v6.thread_availability_period , v7.thread_availability_period); + assert_eq!(v6.paras_availability_period , v7.paras_availability_period); assert_eq!(v6.scheduling_lookahead , v7.scheduling_lookahead); assert_eq!(v6.max_validators_per_core , v7.max_validators_per_core); assert_eq!(v6.max_validators , v7.max_validators); diff --git a/runtime/parachains/src/configuration/tests.rs b/runtime/parachains/src/configuration/tests.rs index 0c2b5a779cb5..b2a81894a939 100644 --- a/runtime/parachains/src/configuration/tests.rs +++ b/runtime/parachains/src/configuration/tests.rs @@ -216,11 +216,7 @@ fn invariants() { ); assert_err!( - Configuration::set_chain_availability_period(RuntimeOrigin::root(), 0), - Error::::InvalidNewValue - ); - assert_err!( - Configuration::set_thread_availability_period(RuntimeOrigin::root(), 0), + Configuration::set_paras_availability_period(RuntimeOrigin::root(), 0), Error::::InvalidNewValue ); assert_err!( @@ -229,17 +225,12 @@ fn invariants() { ); ActiveConfig::::put(HostConfiguration { - chain_availability_period: 10, - thread_availability_period: 8, + paras_availability_period: 10, minimum_validation_upgrade_delay: 11, ..Default::default() }); assert_err!( - Configuration::set_chain_availability_period(RuntimeOrigin::root(), 12), - Error::::InvalidNewValue - ); - assert_err!( - Configuration::set_thread_availability_period(RuntimeOrigin::root(), 12), + Configuration::set_paras_availability_period(RuntimeOrigin::root(), 12), Error::::InvalidNewValue ); assert_err!( @@ -291,11 +282,10 @@ fn setting_pending_config_members() { max_code_size: 100_000, max_pov_size: 1024, max_head_data_size: 1_000, - parathread_cores: 2, - parathread_retries: 5, + on_demand_cores: 2, + on_demand_retries: 5, group_rotation_frequency: 20, - chain_availability_period: 10, - thread_availability_period: 8, + paras_availability_period: 10, scheduling_lookahead: 3, max_validators_per_core: None, max_validators: None, @@ -316,14 +306,17 @@ fn setting_pending_config_members() { hrmp_channel_max_capacity: 3921, hrmp_channel_max_total_size: 7687, hrmp_max_parachain_inbound_channels: 37, - hrmp_max_parathread_inbound_channels: 19, hrmp_channel_max_message_size: 8192, hrmp_max_parachain_outbound_channels: 10, - hrmp_max_parathread_outbound_channels: 20, hrmp_max_message_num_per_candidate: 20, pvf_voting_ttl: 3, minimum_validation_upgrade_delay: 20, executor_params: Default::default(), + on_demand_queue_max_size: 10_000u32, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32, }; Configuration::set_validation_upgrade_cooldown( @@ -345,9 +338,9 @@ fn setting_pending_config_members() { Configuration::set_max_pov_size(RuntimeOrigin::root(), new_config.max_pov_size).unwrap(); Configuration::set_max_head_data_size(RuntimeOrigin::root(), new_config.max_head_data_size) .unwrap(); - Configuration::set_parathread_cores(RuntimeOrigin::root(), new_config.parathread_cores) + Configuration::set_on_demand_cores(RuntimeOrigin::root(), new_config.on_demand_cores) .unwrap(); - Configuration::set_parathread_retries(RuntimeOrigin::root(), new_config.parathread_retries) + Configuration::set_on_demand_retries(RuntimeOrigin::root(), new_config.on_demand_retries) .unwrap(); Configuration::set_group_rotation_frequency( RuntimeOrigin::root(), @@ -361,14 +354,9 @@ fn setting_pending_config_members() { new_config.minimum_validation_upgrade_delay, ) .unwrap(); - Configuration::set_chain_availability_period( + Configuration::set_paras_availability_period( RuntimeOrigin::root(), - new_config.chain_availability_period, - ) - .unwrap(); - Configuration::set_thread_availability_period( - RuntimeOrigin::root(), - new_config.thread_availability_period, + new_config.paras_availability_period, ) .unwrap(); Configuration::set_scheduling_lookahead( @@ -462,11 +450,6 @@ fn setting_pending_config_members() { new_config.hrmp_max_parachain_inbound_channels, ) .unwrap(); - Configuration::set_hrmp_max_parathread_inbound_channels( - RuntimeOrigin::root(), - new_config.hrmp_max_parathread_inbound_channels, - ) - .unwrap(); Configuration::set_hrmp_channel_max_message_size( RuntimeOrigin::root(), new_config.hrmp_channel_max_message_size, @@ -477,11 +460,6 @@ fn setting_pending_config_members() { new_config.hrmp_max_parachain_outbound_channels, ) .unwrap(); - Configuration::set_hrmp_max_parathread_outbound_channels( - RuntimeOrigin::root(), - new_config.hrmp_max_parathread_outbound_channels, - ) - .unwrap(); Configuration::set_hrmp_max_message_num_per_candidate( RuntimeOrigin::root(), new_config.hrmp_max_message_num_per_candidate, diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index c876749e853d..18c4b0d21896 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -1183,11 +1183,7 @@ impl Pallet { let egress_cnt = HrmpEgressChannelsIndex::::decode_len(&origin).unwrap_or(0) as u32; let open_req_cnt = HrmpOpenChannelRequestCount::::get(&origin); - let channel_num_limit = if >::is_parathread(origin) { - config.hrmp_max_parathread_outbound_channels - } else { - config.hrmp_max_parachain_outbound_channels - }; + let channel_num_limit = config.hrmp_max_parachain_outbound_channels; ensure!( egress_cnt + open_req_cnt < channel_num_limit, Error::::OpenHrmpChannelLimitExceeded, @@ -1253,11 +1249,7 @@ impl Pallet { // check if by accepting this open channel request, this parachain would exceed the // number of inbound channels. let config = >::config(); - let channel_num_limit = if >::is_parathread(origin) { - config.hrmp_max_parathread_inbound_channels - } else { - config.hrmp_max_parachain_inbound_channels - }; + let channel_num_limit = config.hrmp_max_parachain_inbound_channels; let ingress_cnt = HrmpIngressChannelsIndex::::decode_len(&origin).unwrap_or(0) as u32; let accepted_cnt = HrmpAcceptedChannelRequestCount::::get(&origin); ensure!( diff --git a/runtime/parachains/src/hrmp/tests.rs b/runtime/parachains/src/hrmp/tests.rs index 8b9fd7136a13..8cfaf48d10ef 100644 --- a/runtime/parachains/src/hrmp/tests.rs +++ b/runtime/parachains/src/hrmp/tests.rs @@ -69,10 +69,8 @@ pub(crate) fn run_to_block(to: BlockNumber, new_session: Option pub(super) struct GenesisConfigBuilder { hrmp_channel_max_capacity: u32, hrmp_channel_max_message_size: u32, - hrmp_max_parathread_outbound_channels: u32, - hrmp_max_parachain_outbound_channels: u32, - hrmp_max_parathread_inbound_channels: u32, - hrmp_max_parachain_inbound_channels: u32, + hrmp_max_paras_outbound_channels: u32, + hrmp_max_paras_inbound_channels: u32, hrmp_max_message_num_per_candidate: u32, hrmp_channel_max_total_size: u32, hrmp_sender_deposit: Balance, @@ -84,10 +82,8 @@ impl Default for GenesisConfigBuilder { Self { hrmp_channel_max_capacity: 2, hrmp_channel_max_message_size: 8, - hrmp_max_parathread_outbound_channels: 1, - hrmp_max_parachain_outbound_channels: 2, - hrmp_max_parathread_inbound_channels: 1, - hrmp_max_parachain_inbound_channels: 2, + hrmp_max_paras_outbound_channels: 2, + hrmp_max_paras_inbound_channels: 2, hrmp_max_message_num_per_candidate: 2, hrmp_channel_max_total_size: 16, hrmp_sender_deposit: 100, @@ -102,10 +98,8 @@ impl GenesisConfigBuilder { let config = &mut genesis.configuration.config; config.hrmp_channel_max_capacity = self.hrmp_channel_max_capacity; config.hrmp_channel_max_message_size = self.hrmp_channel_max_message_size; - config.hrmp_max_parathread_outbound_channels = self.hrmp_max_parathread_outbound_channels; - config.hrmp_max_parachain_outbound_channels = self.hrmp_max_parachain_outbound_channels; - config.hrmp_max_parathread_inbound_channels = self.hrmp_max_parathread_inbound_channels; - config.hrmp_max_parachain_inbound_channels = self.hrmp_max_parachain_inbound_channels; + config.hrmp_max_parachain_outbound_channels = self.hrmp_max_paras_outbound_channels; + config.hrmp_max_parachain_inbound_channels = self.hrmp_max_paras_inbound_channels; config.hrmp_max_message_num_per_candidate = self.hrmp_max_message_num_per_candidate; config.hrmp_channel_max_total_size = self.hrmp_channel_max_total_size; config.hrmp_sender_deposit = self.hrmp_sender_deposit; diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index a547c5a0629e..fb0da50d670d 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -322,8 +322,6 @@ pub mod pallet { UnscheduledCandidate, /// Candidate scheduled despite pending candidate already existing for the para. CandidateScheduledBeforeParaFree, - /// Candidate included with the wrong collator. - WrongCollator, /// Scheduled cores out of order. ScheduledOutOfOrder, /// Head data exceeds the configured maximum. @@ -599,7 +597,7 @@ impl Pallet { pub(crate) fn process_candidates( parent_storage_root: T::Hash, candidates: Vec>, - scheduled: Vec, + scheduled: Vec>>, group_validators: GV, ) -> Result, DispatchError> where @@ -630,15 +628,16 @@ impl Pallet { let mut core_indices_and_backers = Vec::with_capacity(candidates.len()); let mut last_core = None; - let mut check_assignment_in_order = |assignment: &CoreAssignment| -> DispatchResult { - ensure!( - last_core.map_or(true, |core| assignment.core > core), - Error::::ScheduledOutOfOrder, - ); + let mut check_assignment_in_order = + |assignment: &CoreAssignment>| -> DispatchResult { + ensure!( + last_core.map_or(true, |core| assignment.core > core), + Error::::ScheduledOutOfOrder, + ); - last_core = Some(assignment.core); - Ok(()) - }; + last_core = Some(assignment.core); + Ok(()) + }; let signing_context = SigningContext { parent_hash, session_index: shared::Pallet::::session_index() }; @@ -680,17 +679,10 @@ impl Pallet { let para_id = backed_candidate.descriptor().para_id; let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; - for (i, assignment) in scheduled[skip..].iter().enumerate() { - check_assignment_in_order(assignment)?; - - if para_id == assignment.kind.para_id() { - if let Some(required_collator) = assignment.required_collator() { - ensure!( - required_collator == &backed_candidate.descriptor().collator, - Error::::WrongCollator, - ); - } + for (i, core_assignment) in scheduled[skip..].iter().enumerate() { + check_assignment_in_order(core_assignment)?; + if para_id == core_assignment.paras_entry.para_id() { ensure!( >::get(¶_id).is_none() && >::get(¶_id).is_none(), @@ -700,7 +692,7 @@ impl Pallet { // account for already skipped, and then skip this one. skip = i + skip + 1; - let group_vals = group_validators(assignment.group_idx) + let group_vals = group_validators(core_assignment.group_idx) .ok_or_else(|| Error::::InvalidGroupIndex)?; // check the signatures in the backing and that it is a majority. @@ -752,9 +744,9 @@ impl Pallet { } core_indices_and_backers.push(( - (assignment.core, assignment.kind.para_id()), + (core_assignment.core, core_assignment.paras_entry.para_id()), backers, - assignment.group_idx, + core_assignment.group_idx, )); continue 'next_backed_candidate } diff --git a/runtime/parachains/src/inclusion/tests.rs b/runtime/parachains/src/inclusion/tests.rs index ad638f3e0562..0f9ac35a2835 100644 --- a/runtime/parachains/src/inclusion/tests.rs +++ b/runtime/parachains/src/inclusion/tests.rs @@ -24,7 +24,6 @@ use crate::{ }, paras::{ParaGenesisArgs, ParaKind}, paras_inherent::DisputedBitfield, - scheduler_common::Assignment, }; use primitives::{SignedAvailabilityBitfields, UncheckedSignedAvailabilityBitfields}; @@ -33,10 +32,10 @@ use frame_support::assert_noop; use keyring::Sr25519Keyring; use parity_scale_codec::DecodeAll; use primitives::{ + v5::{Assignment, ParasEntry}, BlockNumber, CandidateCommitments, CandidateDescriptor, CollatorId, - CompactStatement as Statement, Hash, ParathreadClaim, ParathreadEntry, - SignedAvailabilityBitfield, SignedStatement, ValidationCode, ValidatorId, ValidityAttestation, - PARACHAIN_KEY_TYPE_ID, + CompactStatement as Statement, Hash, SignedAvailabilityBitfield, SignedStatement, + ValidationCode, ValidatorId, ValidityAttestation, PARACHAIN_KEY_TYPE_ID, }; use sc_keystore::LocalKeystore; use sp_keystore::{Keystore, KeystorePtr}; @@ -45,7 +44,7 @@ use test_helpers::{dummy_collator, dummy_collator_signature, dummy_validation_co fn default_config() -> HostConfiguration { let mut config = HostConfiguration::default(); - config.parathread_cores = 1; + config.on_demand_cores = 1; config.max_code_size = 0b100000; config.max_head_data_size = 0b100000; config @@ -205,7 +204,7 @@ pub(crate) fn run_to_block( } pub(crate) fn expected_bits() -> usize { - Paras::parachains().len() + Configuration::config().parathread_cores as usize + Paras::parachains().len() + Configuration::config().on_demand_cores as usize } fn default_bitfield() -> AvailabilityBitfield { @@ -881,26 +880,23 @@ fn candidate_checks() { .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) }; + let entry_ttl = 10_000; let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); - let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), - kind: Assignment::Parachain(chain_a), + paras_entry: ParasEntry::new(Assignment::new(chain_a), entry_ttl), group_idx: GroupIndex::from(0), }; let chain_b_assignment = CoreAssignment { core: CoreIndex::from(1), - kind: Assignment::Parachain(chain_b), + paras_entry: ParasEntry::new(Assignment::new(chain_b), entry_ttl), group_idx: GroupIndex::from(1), }; let thread_a_assignment = CoreAssignment { core: CoreIndex::from(2), - kind: Assignment::ParathreadA(ParathreadEntry { - claim: ParathreadClaim(thread_a, Some(thread_collator.clone())), - retries: 0, - }), + paras_entry: ParasEntry::new(Assignment::new(thread_a), entry_ttl), group_idx: GroupIndex::from(2), }; @@ -1060,44 +1056,46 @@ fn candidate_checks() { ); } + // TODO: Will be tested at a higher level + // // candidate has wrong collator. - { - let mut candidate = TestCandidateBuilder { - para_id: thread_a, - relay_parent: System::parent_hash(), - pov_hash: Hash::repeat_byte(1), - persisted_validation_data_hash: make_vdata_hash(thread_a).unwrap(), - hrmp_watermark: RELAY_PARENT_NUM, - ..Default::default() - } - .build(); - - assert!(CollatorId::from(Sr25519Keyring::One.public()) != thread_collator); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - - let backed = back_candidate( - candidate, - &validators, - group_validators(GroupIndex::from(2)).unwrap().as_ref(), - &keystore, - &signing_context, - BackingKind::Threshold, - ); - - assert_noop!( - ParaInclusion::process_candidates( - Default::default(), - vec![backed], - vec![ - chain_a_assignment.clone(), - chain_b_assignment.clone(), - thread_a_assignment.clone(), - ], - &group_validators, - ), - Error::::WrongCollator, - ); - } + //{ + // let mut candidate = TestCandidateBuilder { + // para_id: thread_a, + // relay_parent: System::parent_hash(), + // pov_hash: Hash::repeat_byte(1), + // persisted_validation_data_hash: make_vdata_hash(thread_a).unwrap(), + // hrmp_watermark: RELAY_PARENT_NUM, + // ..Default::default() + // } + // .build(); + + // assert!(CollatorId::from(Sr25519Keyring::One.public()) != thread_collator); + // collator_sign_candidate(Sr25519Keyring::One, &mut candidate); + + // let backed = back_candidate( + // candidate, + // &validators, + // group_validators(GroupIndex::from(2)).unwrap().as_ref(), + // &keystore, + // &signing_context, + // BackingKind::Threshold, + // ); + + // assert_noop!( + // ParaInclusion::process_candidates( + // Default::default(), + // vec![backed], + // vec![ + // chain_a_assignment.clone(), + // chain_b_assignment.clone(), + // thread_a_assignment.clone(), + // ], + // &group_validators, + // ), + // Error::::WrongPeerId, + // ); + //} // candidate not well-signed by collator. { @@ -1428,26 +1426,23 @@ fn backing_works() { .map(|vs| vs.into_iter().map(ValidatorIndex).collect::>()) }; - let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); + let entry_ttl = 10_000; let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), - kind: Assignment::Parachain(chain_a), + paras_entry: ParasEntry::new(Assignment::new(chain_a), entry_ttl), group_idx: GroupIndex::from(0), }; let chain_b_assignment = CoreAssignment { core: CoreIndex::from(1), - kind: Assignment::Parachain(chain_b), + paras_entry: ParasEntry::new(Assignment::new(chain_b), entry_ttl), group_idx: GroupIndex::from(1), }; let thread_a_assignment = CoreAssignment { core: CoreIndex::from(2), - kind: Assignment::ParathreadA(ParathreadEntry { - claim: ParathreadClaim(thread_a, Some(thread_collator.clone())), - retries: 0, - }), + paras_entry: ParasEntry::new(Assignment::new(thread_a), entry_ttl), group_idx: GroupIndex::from(2), }; @@ -1511,7 +1506,7 @@ fn backing_works() { BackingKind::Threshold, ); - let backed_candidates = vec![backed_a, backed_b, backed_c]; + let backed_candidates = vec![backed_a.clone(), backed_b.clone(), backed_c]; let get_backing_group_idx = { // the order defines the group implicitly for this test case let backed_candidates_with_groups = backed_candidates @@ -1710,9 +1705,11 @@ fn can_include_candidate_with_ok_code_upgrade() { .map(|vs| vs.into_iter().map(ValidatorIndex).collect::>()) }; + let entry_ttl = 10_000; + let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), - kind: Assignment::Parachain(chain_a), + paras_entry: ParasEntry::new(Assignment::new(chain_a), entry_ttl), group_idx: GroupIndex::from(0), }; @@ -1965,8 +1962,11 @@ fn para_upgrade_delay_scheduled_from_inclusion() { let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), - para_id: chain_a, - kind: AssignmentKind::Parachain, + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 5, + }, group_idx: GroupIndex::from(0), }; @@ -2000,7 +2000,7 @@ fn para_upgrade_delay_scheduled_from_inclusion() { ) .expect("candidates scheduled, in order, and backed"); - assert_eq!(occupied_cores, vec![CoreIndex::from(0)]); + assert_eq!(occupied_cores, vec![(CoreIndex::from(0), chain_a)]); // Run a couple of blocks before the inclusion. run_to_block(7, |_| None); diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index b9ecc3038ca2..a925ed7b9545 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -238,6 +238,9 @@ impl Pallet { buf }; + // inform about upcoming new session + scheduler::Pallet::::pre_new_session(); + let configuration::SessionChangeOutcome { prev_config, new_config } = configuration::Pallet::::initializer_on_new_session(&session_index); let new_config = new_config.unwrap_or_else(|| prev_config.clone()); diff --git a/runtime/parachains/src/lib.rs b/runtime/parachains/src/lib.rs index 9aadf6590fcd..5cafe725ea03 100644 --- a/runtime/parachains/src/lib.rs +++ b/runtime/parachains/src/lib.rs @@ -23,6 +23,9 @@ #![cfg_attr(feature = "runtime-benchmarks", recursion_limit = "256")] #![cfg_attr(not(feature = "std"), no_std)] +pub mod assigner; +pub mod assigner_on_demand; +pub mod assigner_parachains; pub mod configuration; pub mod disputes; pub mod dmp; @@ -36,8 +39,6 @@ pub mod paras_inherent; pub mod reward_points; pub mod scheduler; pub mod scheduler_common; -pub mod scheduler_parachains; -pub mod scheduler_polkadot; pub mod session_info; pub mod shared; diff --git a/runtime/parachains/src/mock.rs b/runtime/parachains/src/mock.rs index 6bc6196316a6..f978b6c3360e 100644 --- a/runtime/parachains/src/mock.rs +++ b/runtime/parachains/src/mock.rs @@ -17,11 +17,11 @@ //! Mocks for all the traits. use crate::{ - configuration, disputes, dmp, hrmp, + assigner, assigner_on_demand, assigner_parachains, configuration, disputes, dmp, hrmp, inclusion::{self, AggregateMessageOrigin, UmpQueueId}, initializer, origin, paras, paras::ParaKind, - paras_inherent, scheduler, scheduler_polkadot, session_info, shared, ParaId, + paras_inherent, scheduler, session_info, shared, ParaId, }; use frame_support::{ @@ -43,7 +43,7 @@ use sp_io::TestExternalities; use sp_runtime::{ traits::{AccountIdConversion, BlakeTwo256, IdentityLookup}, transaction_validity::TransactionPriority, - BuildStorage, Perbill, Permill, + BuildStorage, FixedU128, Perbill, Permill, }; use std::{cell::RefCell, collections::HashMap}; @@ -62,7 +62,9 @@ frame_support::construct_runtime!( ParaInclusion: inclusion, ParaInherent: paras_inherent, Scheduler: scheduler, - SchedulerPolkadot: scheduler_polkadot, + Assigner: assigner, + OnDemandAssigner: assigner_on_demand, + ParachainsAssigner: assigner_parachains, Initializer: initializer, Dmp: dmp, Hrmp: hrmp, @@ -282,10 +284,8 @@ impl crate::disputes::SlashingHandler for Test { fn initializer_on_new_session(_: SessionIndex) {} } -impl crate::scheduler_parachains::Config for Test {} -impl crate::scheduler_polkadot::Config for Test {} impl crate::scheduler::Config for Test { - type AssignmentProvider = crate::scheduler_polkadot::Pallet; + type AssignmentProvider = Assigner; } pub struct TestMessageQueueWeight; @@ -339,6 +339,24 @@ impl pallet_message_queue::Config for Test { type ServiceWeight = MessageQueueServiceWeight; } +impl assigner::Config for Test { + type ParachainsAssignmentProvider = ParachainsAssigner; + type OnDemandAssignmentProvider = OnDemandAssigner; +} + +impl assigner_parachains::Config for Test {} + +parameter_types! { + pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); +} + +impl assigner_on_demand::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type TrafficDefaultValue = OnDemandTrafficDefaultValue; + type WeightInfo = crate::assigner_on_demand::TestWeightInfo; +} + impl crate::inclusion::Config for Test { type WeightInfo = (); type RuntimeEvent = RuntimeEvent; diff --git a/runtime/parachains/src/paras/tests.rs b/runtime/parachains/src/paras/tests.rs index 2bf30bb273e5..4ff2a506f1fb 100644 --- a/runtime/parachains/src/paras/tests.rs +++ b/runtime/parachains/src/paras/tests.rs @@ -752,8 +752,7 @@ fn full_parachain_cleanup_storage() { minimum_validation_upgrade_delay: 2, // Those are not relevant to this test. However, HostConfiguration is still a // subject for the consistency check. - chain_availability_period: 1, - thread_availability_period: 1, + paras_availability_period: 1, ..Default::default() }, ..Default::default() diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index ce43aa30ee35..414f15237a44 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -897,7 +897,7 @@ fn sanitize_backed_candidates< relay_parent: T::Hash, mut backed_candidates: Vec>, mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, - scheduled: &[CoreAssignment], + scheduled: &[CoreAssignment>], ) -> Vec> { // Remove any candidates that were concluded invalid. // This does not assume sorting. @@ -907,7 +907,7 @@ fn sanitize_backed_candidates< let scheduled_paras_to_core_idx = scheduled .into_iter() - .map(|core_assignment| (core_assignment.kind.para_id(), core_assignment.core)) + .map(|core_assignment| (core_assignment.paras_entry.para_id(), core_assignment.core)) .collect::>(); // Assure the backed candidate's `ParaId`'s core is free. diff --git a/runtime/parachains/src/paras_inherent/tests.rs b/runtime/parachains/src/paras_inherent/tests.rs index 5aba48c5deaf..d1ec31714c3d 100644 --- a/runtime/parachains/src/paras_inherent/tests.rs +++ b/runtime/parachains/src/paras_inherent/tests.rs @@ -15,7 +15,6 @@ // along with Polkadot. If not, see . use super::*; -use crate::scheduler_common; // In order to facilitate benchmarks as tests we have a benchmark feature gated `WeightInfo` impl // that uses 0 for all the weights. Because all the weights are 0, the tests that rely on @@ -73,7 +72,10 @@ mod enter { // becoming fully available, the backed candidates will not be filtered out in `create_inherent` and // will not cause `enter` to early. fn include_backed_candidates() { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let config = MockGenesisConfig::default(); + assert!(config.configuration.config.scheduling_lookahead > 0); + + new_test_ext(config).execute_with(|| { let dispute_statements = BTreeMap::new(); let mut backed_and_concluding = BTreeMap::new(); @@ -606,7 +608,10 @@ mod enter { #[test] // Ensure that when a block is over weight due to disputes and bitfields, we filter. fn limit_candidates_over_weight_1() { - new_test_ext(MockGenesisConfig::default()).execute_with(|| { + let config = MockGenesisConfig::default(); + assert!(config.configuration.config.scheduling_lookahead > 0); + + new_test_ext(config).execute_with(|| { // Create the inherent data for this block let mut dispute_statements = BTreeMap::new(); // Control the number of statements per dispute to ensure we have enough space @@ -951,7 +956,10 @@ mod sanitizers { use crate::mock::Test; use keyring::Sr25519Keyring; - use primitives::PARACHAIN_KEY_TYPE_ID; + use primitives::{ + v5::{Assignment, ParasEntry}, + PARACHAIN_KEY_TYPE_ID, + }; use sc_keystore::LocalKeystore; use sp_keystore::{Keystore, KeystorePtr}; use std::sync::Arc; @@ -1223,12 +1231,16 @@ mod sanitizers { let has_concluded_invalid = |_idx: usize, _backed_candidate: &BackedCandidate| -> bool { false }; + let entry_ttl = 10_000; let scheduled = (0_usize..2) .into_iter() .map(|idx| { let core_idx = CoreIndex::from(idx as u32); let ca = CoreAssignment { - kind: scheduler_common::Assignment::Parachain(ParaId::from(1_u32 + idx as u32)), + paras_entry: ParasEntry::new( + Assignment::new(ParaId::from(1_u32 + idx as u32)), + entry_ttl, + ), group_idx: GroupIndex::from(idx as u32), core: core_idx, }; diff --git a/runtime/parachains/src/runtime_api_impl/v5.rs b/runtime/parachains/src/runtime_api_impl/v5.rs index 62c6b02d7763..9b3996fa2f53 100644 --- a/runtime/parachains/src/runtime_api_impl/v5.rs +++ b/runtime/parachains/src/runtime_api_impl/v5.rs @@ -27,8 +27,8 @@ use primitives::{ CoreIndex, CoreOccupied, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCore, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_runtime::traits::One; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -97,9 +97,10 @@ pub fn availability_cores() -> Vec { - let pending_availability = >::pending_availability(para_id) - .expect("Occupied core always has pending availability; qed"); + CoreOccupied::Paras(entry) => { + let pending_availability = + >::pending_availability(entry.para_id()) + .expect("Occupied core always has pending availability; qed"); let backed_in_number = *pending_availability.backed_in_number(); CoreState::Occupied(OccupiedCore { @@ -107,31 +108,7 @@ pub fn availability_cores() -> Vec>::next_up_on_time_out(CoreIndex( - i as u32, - )), - availability: pending_availability.availability_votes().clone(), - group_responsible: group_responsible_for( - backed_in_number, - pending_availability.core_occupied(), - ), - candidate_hash: pending_availability.candidate_hash(), - candidate_descriptor: pending_availability.candidate_descriptor().clone(), - }) - }, - CoreOccupied::Parathread(p) => { - let para_id = p.claim.0; - let pending_availability = >::pending_availability(para_id) - .expect("Occupied core always has pending availability; qed"); - - let backed_in_number = *pending_availability.backed_in_number(); - CoreState::Occupied(OccupiedCore { - next_up_on_available: >::next_up_on_available(CoreIndex( - i as u32, - )), - occupied_since: backed_in_number, - time_out_at: time_out_at(backed_in_number, config.thread_availability_period), + time_out_at: time_out_at(backed_in_number, config.paras_availability_period), next_up_on_time_out: >::next_up_on_time_out(CoreIndex( i as u32, )), @@ -149,10 +126,10 @@ pub fn availability_cores() -> Vec>::scheduled_claimqueue() { - core_states[scheduled.core.0 as usize] = CoreState::Scheduled(ScheduledCore { - para_id: scheduled.kind.para_id(), - collator: scheduled.required_collator().map(|c| c.clone()), + for scheduled in >::scheduled_claimqueue(now) { + core_states[scheduled.core.0 as usize] = CoreState::Scheduled(primitives::ScheduledCore { + para_id: scheduled.paras_entry.para_id(), + collator: None, }); } diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 9d37ab1c6090..20a059dca471 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -38,7 +38,7 @@ use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - CoreIndex, CoreOccupied, GroupIndex, GroupRotationInfo, Id as ParaId, ParathreadEntry, + v5::ParasEntry, CoreIndex, CoreOccupied, GroupIndex, GroupRotationInfo, Id as ParaId, ScheduledCore, ValidatorIndex, }; use sp_runtime::traits::{One, Saturating}; @@ -51,18 +51,15 @@ use crate::{ configuration, initializer::SessionChangeNotification, paras, - scheduler_common::{CoreAssignment, FreedReason}, + scheduler_common::{AssignmentProvider, CoreAssignment, FreedReason}, }; -use crate::scheduler_common::{Assignment, AssignmentProvider}; pub use pallet::*; #[cfg(test)] mod tests; -/// The current storage version -const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); -const LOG_TARGET: &str = "runtime::scheduler"; +const LOG_TARGET: &str = "runtime::parachains::scheduler"; pub mod migration; #[frame_support::pallet] @@ -70,6 +67,8 @@ pub mod pallet { use super::*; use crate::scheduler_common::AssignmentProvider; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] #[pallet::without_storage_info] #[pallet::storage_version(STORAGE_VERSION)] @@ -77,7 +76,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + configuration::Config + paras::Config { - type AssignmentProvider: AssignmentProvider; + type AssignmentProvider: AssignmentProvider>; } /// All the validator groups. One for each core. Indices are into `ActiveValidators` - not the @@ -100,7 +99,8 @@ pub mod pallet { /// * The number of validators divided by `configuration.max_validators_per_core`. #[pallet::storage] #[pallet::getter(fn availability_cores)] - pub(crate) type AvailabilityCores = StorageValue<_, Vec, ValueQuery>; + pub(crate) type AvailabilityCores = + StorageValue<_, Vec>>, ValueQuery>; /// The block number where the session start occurred. Used to track how many group rotations have occurred. /// @@ -113,14 +113,21 @@ pub mod pallet { pub(crate) type SessionStartBlock = StorageValue<_, BlockNumberFor, ValueQuery>; /// One entry for each availability core. The `VecDeque` represents the assignments to be scheduled on that core. + /// `None` is used to signal to not schedule the next para of the core as there is one currently being scheduled. + /// Not using `None` here would overwrite the `CoreState` in the runtime API. + + // TODO: this is behaviour is likely going to change when adapting for AB #[pallet::storage] #[pallet::getter(fn claimqueue)] - pub(crate) type ClaimQueue = - StorageValue<_, BTreeMap>>, ValueQuery>; + pub(crate) type ClaimQueue = StorageValue< + _, + BTreeMap>>>>, + ValueQuery, + >; } type PositionInClaimqueue = u32; -type TimedoutParas = BTreeMap; +type TimedoutParas = BTreeMap>>; type ConcludedParas = BTreeMap; impl Pallet { @@ -132,6 +139,12 @@ impl Pallet { /// Called by the initializer to finalize the scheduler pallet. pub(crate) fn initializer_finalize() {} + /// Called before the initializer notifies of a new session. + pub(crate) fn pre_new_session() { + Self::push_claimqueue_items_to_assignment_provider(); + Self::push_occupied_cores_to_assignment_provider(); + } + /// Called by the initializer to note that a new session has started. pub(crate) fn initializer_on_new_session( notification: &SessionChangeNotification>, @@ -147,16 +160,7 @@ impl Pallet { }, ); - Self::reschedule_occupied_cores(AvailabilityCores::::get()); - Self::clear_claimqueue(); - T::AssignmentProvider::new_session(); - - // clear all cores AvailabilityCores::::mutate(|cores| { - for core in cores.iter_mut() { - *core = CoreOccupied::Free; - } - cores.resize(n_cores as _, CoreOccupied::Free); }); @@ -199,11 +203,12 @@ impl Pallet { } /// Free unassigned cores. Provide a list of cores that should be considered newly-freed along with the reason - /// for them being freed. + /// for them being freed. Returns a tuple of concluded and timedout paras. fn free_cores( - just_freed_cores: BTreeMap, - ) -> (ConcludedParas, TimedoutParas) { - let mut timedout_paras = BTreeMap::new(); + just_freed_cores: impl IntoIterator, + ) -> (ConcludedParas, TimedoutParas) { + let mut timedout_paras: BTreeMap>> = + BTreeMap::new(); let mut concluded_paras = BTreeMap::new(); AvailabilityCores::::mutate(|cores| { @@ -215,26 +220,13 @@ impl Pallet { .for_each(|(freed_index, freed_reason)| { match &cores[freed_index.0 as usize] { CoreOccupied::Free => {}, - CoreOccupied::Parachain(_) => {}, // If we ever do slot sharing parachains, this case needs to be handled - CoreOccupied::Parathread(entry) => { + CoreOccupied::Paras(entry) => { match freed_reason { FreedReason::Concluded => { - concluded_paras.insert(freed_index, entry.claim.0); + concluded_paras.insert(freed_index, entry.para_id()); }, FreedReason::TimedOut => { - if entry.retries < - T::AssignmentProvider::get_max_retries(freed_index) - { - let entry = ParathreadEntry { - retries: entry.retries + 1, - claim: entry.claim.clone(), - }; - timedout_paras - .insert(freed_index, Assignment::ParathreadA(entry)); - } else { - // Consider max retried parathreads as concluded for the assignment provider - concluded_paras.insert(freed_index, entry.claim.0); - } + timedout_paras.insert(freed_index, entry.clone()); }, }; }, @@ -247,42 +239,92 @@ impl Pallet { (concluded_paras, timedout_paras) } - /// Note that the given cores have become occupied. Behavior undefined if any of the given cores were not scheduled - /// or the slice is not sorted ascending by core index. - /// - /// Complexity: O(n) in the number of scheduled cores, which is capped at the number of total cores. - /// This is efficient in the case that most scheduled cores are occupied. + /// Note that the given cores have become occupied. Update the claimqueue accordingly. pub(crate) fn occupied( now_occupied: BTreeMap, ) -> BTreeMap { let mut availability_cores = AvailabilityCores::::get(); - let pos_mapping = now_occupied - .into_iter() - .flat_map(|(core_idx, para_id)| match Self::remove_from_claimqueue(core_idx, para_id) { - Err(_) => None, // TODO: report back? - Ok((pos_in_claimqueue, assignment)) => { - // is this correct? - availability_cores[core_idx.0 as usize] = assignment.to_core_occupied(); + log::debug!(target: LOG_TARGET, "[occupied] now_occupied {:?}", now_occupied); - Some((core_idx, pos_in_claimqueue)) - }, + let pos_mapping: BTreeMap = now_occupied + .iter() + .flat_map(|(core_idx, para_id)| { + match Self::remove_from_claimqueue(*core_idx, *para_id) { + Err(e) => { + log::debug!( + target: LOG_TARGET, + "[occupied] error on remove_from_claimqueue {}", + e + ); + None + }, + Ok((pos_in_claimqueue, pe)) => { + // is this correct? + availability_cores[core_idx.0 as usize] = CoreOccupied::Paras(pe); + + Some((*core_idx, pos_in_claimqueue)) + }, + } }) .collect(); + // Drop expired claims after processing now_occupied. + Self::drop_expired_claims_from_claimqueue(); + AvailabilityCores::::set(availability_cores); pos_mapping } + /// Iterates through every element in all claim queues and tries to add new assignments from the + /// `AssignmentProvider`. A claim is considered expired if it's `ttl` field is lower than the + /// current block height. + fn drop_expired_claims_from_claimqueue() { + let now = >::block_number(); + let availability_cores = AvailabilityCores::::get(); + + ClaimQueue::::mutate(|cq| { + for (idx, _) in (0u32..).zip(availability_cores) { + let core_idx = CoreIndex(idx); + if let Some(core_claimqueue) = cq.get_mut(&core_idx) { + let mut dropped_claims: Vec> = vec![]; + core_claimqueue.retain(|maybe_entry| { + if let Some(entry) = maybe_entry { + if entry.ttl < now { + dropped_claims.push(Some(entry.para_id())); + return false + } + } + true + }); + + // For all claims dropped due to TTL, attempt to pop a new entry to + // the back of the claimqueue. + for drop in dropped_claims { + match T::AssignmentProvider::pop_assignment_for_core(core_idx, drop) { + Some(assignment) => { + let ttl = >::config().on_demand_ttl; + core_claimqueue.push_back(Some(ParasEntry::new( + assignment.clone(), + now + ttl, + ))); + }, + None => (), + } + } + } + } + }); + } + /// Get the para (chain or thread) ID assigned to a particular core or index, if any. Core indices /// out of bounds will return `None`, as will indices of unassigned cores. pub(crate) fn core_para(core_index: CoreIndex) -> Option { let cores = AvailabilityCores::::get(); match cores.get(core_index.0 as usize) { None | Some(CoreOccupied::Free) => None, - Some(CoreOccupied::Parachain(para_id)) => Some(*para_id), - Some(CoreOccupied::Parathread(entry)) => Some(entry.claim.0), + Some(CoreOccupied::Paras(entry)) => Some(entry.para_id()), } } @@ -327,50 +369,44 @@ impl Pallet { /// Returns an optional predicate that should be used for timing out occupied cores. /// /// If `None`, no timing-out should be done. The predicate accepts the index of the core, and the - /// block number since which it has been occupied, and the respective parachain and parathread - /// timeouts, i.e. only within `max(config.chain_availability_period, config.thread_availability_period)` - /// of the last rotation would this return `Some`, unless there are no rotations. + /// block number since which it has been occupied, and the respective parachain timeout, i.e. only within + /// `config.paras_availability_period` of the last rotation would this return `Some`, + /// unless there are no rotations. /// - /// This really should not be a box, but is working around a compiler limitation filed here: - /// https://github.com/rust-lang/rust/issues/73226 - /// which prevents us from testing the code if using `impl Trait`. + /// The timeout used to depend, but does not depend any more on group rotations. First of all + /// it only matters if a para got another chance (a retry). If there is a retry and it happens + /// still within the same group rotation a censoring backing group would need to censor again + /// and lose out again on backing rewards. This is bad for the censoring backing group, it does + /// not matter for the parachain as long as it is retried often enough (so it eventually gets a + /// try on another backing group) - the effect is similar to having a prolonged timeout. It + /// should also be noted that for both malicious and offline backing groups it is actually more + /// realistic that the candidate will not be backed to begin with, instead of getting backed + /// and then not made available. pub(crate) fn availability_timeout_predicate( - ) -> Option) -> bool>> { + ) -> Option) -> bool> { let now = >::block_number(); let config = >::config(); - let session_start = >::get(); + let blocks_since_session_start = now.saturating_sub(session_start); let blocks_since_last_rotation = blocks_since_session_start % config.group_rotation_frequency.max(1u8.into()); - let absolute_cutoff = - sp_std::cmp::max(config.chain_availability_period, config.thread_availability_period); - - let availability_cores = AvailabilityCores::::get(); - - if blocks_since_last_rotation >= absolute_cutoff { + if blocks_since_last_rotation >= config.paras_availability_period { None } else { - let predicate = move |core_index: CoreIndex, pending_since| { + Some(|core_index: CoreIndex, pending_since| { + let availability_cores = AvailabilityCores::::get(); + let availability_period = + T::AssignmentProvider::get_availability_period(core_index); + let now = >::block_number(); match availability_cores.get(core_index.0 as usize) { None => true, // out-of-bounds, doesn't really matter what is returned. - Some(CoreOccupied::Free) => true, // core not occupied, still doesn't really matter. - Some(_) => { - // core not occupied, still doesn't really matter. - let availability_period = - T::AssignmentProvider::get_availability_period(core_index); - if blocks_since_last_rotation >= availability_period { - false // no pruning except recently after rotation. - } else { - let now = >::block_number(); - now.saturating_sub(pending_since) >= availability_period - } - }, + Some(CoreOccupied::Free) => true, // core free, still doesn't matter. + Some(CoreOccupied::Paras(_)) => + now.saturating_sub(pending_since) >= availability_period, } - }; - - Some(Box::new(predicate)) + }) } } @@ -390,79 +426,95 @@ impl Pallet { pub(crate) fn next_up_on_available(core: CoreIndex) -> Option { ClaimQueue::::get().get(&core).and_then(|a| { a.iter() - .position(|e| e.is_some()) - .and_then(|pos| Self::assignment_to_scheduled_core(&a[pos])) + .find_map(|e| e.as_ref()) + .map(|pe| Self::paras_entry_to_scheduled_core(pe)) }) } - fn assignment_to_scheduled_core(ca: &Option) -> Option { - match ca { - None => None, - Some(ca) => match ca.kind.clone() { - Assignment::Parachain(_para_id) => - Some(ScheduledCore { para_id: ca.kind.para_id(), collator: None }), - Assignment::ParathreadA(entry) => - Some(ScheduledCore { para_id: ca.kind.para_id(), collator: entry.claim.1 }), - }, - } + fn paras_entry_to_scheduled_core(pe: &ParasEntry>) -> ScheduledCore { + ScheduledCore { para_id: pe.para_id(), collator: None } } /// Return the next thing that will be scheduled on this core assuming it is currently /// occupied and the candidate occupying it times out. pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option { - Self::next_up_on_available(core) + Self::next_up_on_available(core).or_else(|| { + // Or, if none, the claim currently occupying the core, + // as it would be put back on the queue after timing out if number of retries is not at the maximum. + let cores = AvailabilityCores::::get(); + cores.get(core.0 as usize).and_then(|c| match c { + CoreOccupied::Free => None, + CoreOccupied::Paras(pe) => { + if pe.availability_timeouts < T::AssignmentProvider::get_max_retries(core) { + Some(Self::paras_entry_to_scheduled_core(pe)) + } else { + None + } + }, + }) + }) + } + + /// Pushes occupied cores to the assignment provider. + fn push_occupied_cores_to_assignment_provider() { + AvailabilityCores::::mutate(|cores| { + for (core_idx, core) in cores.iter_mut().enumerate() { + match core { + CoreOccupied::Free => continue, + CoreOccupied::Paras(entry) => { + let core_idx = CoreIndex::from(core_idx as u32); + Self::maybe_push_assignment(core_idx, entry.clone()); + }, + } + *core = CoreOccupied::Free; + } + }); } // on new session - fn reschedule_occupied_cores(cores: Vec) { - for (core_idx, core) in cores.into_iter().enumerate() { - match Assignment::from_core_occupied(core) { - None => continue, - Some(ass) => T::AssignmentProvider::push_assignment_for_core( - CoreIndex::from(core_idx as u32), - ass, - ), + fn push_claimqueue_items_to_assignment_provider() { + for (core_idx, core_claimqueue) in ClaimQueue::::take() { + // Push back in reverse order so that when we pop from the provider again, + // the entries in the claimqueue are in the same order as they are right now. + for para_entry in core_claimqueue.into_iter().flatten().rev() { + Self::maybe_push_assignment(core_idx, para_entry); } } } + /// Push assignments back to the provider on session change unless the paras + /// timed out on availability before. + fn maybe_push_assignment(core_idx: CoreIndex, pe: ParasEntry>) { + if pe.availability_timeouts == 0 { + T::AssignmentProvider::push_assignment_for_core(core_idx, pe.assignment); + } + } + // // ClaimQueue related functions // fn claimqueue_lookahead() -> u32 { - match >::config().scheduling_lookahead { - 0 => 1, - n => n, - } - } - - // on new session - fn clear_claimqueue() { - for (core_idx, cqv) in ClaimQueue::::take() { - for ca in cqv.into_iter().flatten() { - T::AssignmentProvider::push_assignment_for_core( - core_idx, - Assignment::from_core_assignment(ca), - ); - } - } + >::config().scheduling_lookahead } + /// Updates the claimqueue by moving it to the next paras and filling empty spots with new paras. pub(crate) fn update_claimqueue( - just_freed_cores: BTreeMap, + just_freed_cores: impl IntoIterator, now: BlockNumberFor, - ) -> Vec { + ) -> Vec>> { Self::move_claimqueue_forward(); - Self::fill_claimqueue(just_freed_cores, now) + Self::free_cores_and_fill_claimqueue(just_freed_cores, now) } + /// Moves all elements in the claimqueue forward. fn move_claimqueue_forward() { let mut cq = ClaimQueue::::get(); - for (_, vec) in cq.iter_mut() { - match vec.front() { + for (_, core_queue) in cq.iter_mut() { + // First pop the finished claims from the front. + match core_queue.front() { None => {}, Some(None) => { - vec.pop_front(); + core_queue.pop_front(); }, Some(_) => {}, } @@ -471,10 +523,11 @@ impl Pallet { ClaimQueue::::set(cq); } - fn fill_claimqueue( - just_freed_cores: BTreeMap, + /// Frees cores and fills the free claimqueue spots by popping from the `AssignmentProvider`. + fn free_cores_and_fill_claimqueue( + just_freed_cores: impl IntoIterator, now: BlockNumberFor, - ) -> Vec { + ) -> Vec>> { let (mut concluded_paras, mut timedout_paras) = Self::free_cores(just_freed_cores); // This can only happen on new sessions at which we move all assignments back to the provider. @@ -485,65 +538,82 @@ impl Pallet { let n_lookahead = Self::claimqueue_lookahead(); let n_session_cores = T::AssignmentProvider::session_core_count(); let cq = ClaimQueue::::get(); + let ttl = >::config().on_demand_ttl; for core_idx in 0..n_session_cores { - let core_idx = CoreIndex(core_idx); - let group_idx = Self::group_assigned_to_core(core_idx, now).expect( - "core is not out of bounds and we are guaranteed \ - to be after the most recent session start; qed", - ); + let core_idx = CoreIndex::from(core_idx); // add previously timedout paras back into the queue - if let Some(assignment) = timedout_paras.remove(&core_idx) { - let ca = assignment.to_core_assignment(core_idx, group_idx); - Self::add_to_claimqueue(ca); + if let Some(mut entry) = timedout_paras.remove(&core_idx) { + if entry.availability_timeouts < + T::AssignmentProvider::get_max_retries(core_idx) + { + // Increment the timeout counter. + entry.availability_timeouts += 1; + // Reset the ttl so that a timed out assignment. + entry.ttl = now + ttl; + Self::add_to_claimqueue(core_idx, entry); + // The claim has been added back into the claimqueue. + // Do not pop another assignment for the core. + continue + } else { + // Consider timed out assignments for on demand parachains as concluded for the assignment provider + let ret = concluded_paras.insert(core_idx, entry.para_id()); + debug_assert!(ret.is_none()); + } } - let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32); + // We consider occupied cores to be part of the claimqueue + let n_lookahead_used = cq.get(&core_idx).map_or(0, |v| v.len() as u32) + + if Self::is_core_occupied(core_idx) { 1 } else { 0 }; for _ in n_lookahead_used..n_lookahead { let concluded_para = concluded_paras.remove(&core_idx); - if let Some(ass) = + if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx, concluded_para) { - let ca = ass.to_core_assignment(core_idx, group_idx); - Self::add_to_claimqueue(ca); + Self::add_to_claimqueue(core_idx, ParasEntry::new(assignment, now + ttl)); } } } - assert!(timedout_paras.is_empty()); - assert!(concluded_paras.is_empty()); + debug_assert!(timedout_paras.is_empty()); + debug_assert!(concluded_paras.is_empty()); - Self::scheduled_claimqueue() + Self::scheduled_claimqueue(now) } } - fn add_to_claimqueue(ca: CoreAssignment) { - ClaimQueue::::mutate(|la| match la.get_mut(&ca.core) { - None => { - la.insert(ca.core, vec![Some(ca)].into_iter().collect()); - }, - Some(la_vec) => la_vec.push_back(Some(ca)), + fn is_core_occupied(core_idx: CoreIndex) -> bool { + match AvailabilityCores::::get().get(core_idx.0 as usize) { + None | Some(CoreOccupied::Free) => false, + Some(CoreOccupied::Paras(_)) => true, + } + } + + fn add_to_claimqueue(core_idx: CoreIndex, pe: ParasEntry>) { + ClaimQueue::::mutate(|la| { + let la_deque = la.entry(core_idx).or_insert_with(|| VecDeque::new()); + la_deque.push_back(Some(pe)); }); } - /// Returns `CoreAssignment` with `para_id` at `core_idx` if found. + /// Returns `ParasEntry` with `para_id` at `core_idx` if found. fn remove_from_claimqueue( core_idx: CoreIndex, para_id: ParaId, - ) -> Result<(PositionInClaimqueue, CoreAssignment), &'static str> { + ) -> Result<(PositionInClaimqueue, ParasEntry>), &'static str> { let mut cq = ClaimQueue::::get(); - let la_vec = cq.get_mut(&core_idx).ok_or_else(|| "core_idx not found in lookahead")?; + let la_vec = cq.get_mut(&core_idx).ok_or("core_idx not found in lookahead")?; let pos = la_vec .iter() - .position(|a| a.as_ref().map_or(false, |v| v.kind.para_id() == para_id)) - .ok_or_else(|| "para id not found at core_idx lookahead")?; + .position(|a| a.as_ref().map_or(false, |pe| pe.para_id() == para_id)) + .ok_or("para id not found at core_idx lookahead")?; - let ca = la_vec + let pe = la_vec .remove(pos) - .expect("position() above tells us this element exist.") - .expect("position() above tells us this element exist."); + .ok_or("remove returned None")? + .ok_or("Element in Claimqueue was None.")?; // Since the core is now occupied, the next entry in the claimqueue in order to achieve 12 second block times needs to be None if la_vec.front() != Some(&None) { @@ -551,13 +621,33 @@ impl Pallet { } ClaimQueue::::set(cq); - Ok((pos as u32, ca)) + Ok((pos as u32, pe)) } - // Temporary to imitate the old schedule() call. Will disappear when we make the scheduler AB ready - pub(crate) fn scheduled_claimqueue() -> Vec { + // TODO: Temporary to imitate the old schedule() call. Will be adjusted when we make the scheduler AB ready + pub(crate) fn scheduled_claimqueue( + now: BlockNumberFor, + ) -> Vec>> { let claimqueue = ClaimQueue::::get(); - claimqueue.into_iter().flat_map(|(_, v)| v.front().cloned()).flatten().collect() + + claimqueue + .into_iter() + .flat_map(|(core_idx, v)| { + v.front() + .cloned() + .flatten() + .and_then(|pe| Self::paras_entry_to_core_assignment(now, core_idx, pe)) + }) + .collect() + } + + fn paras_entry_to_core_assignment( + now: BlockNumberFor, + core_idx: CoreIndex, + pe: ParasEntry>, + ) -> Option>> { + let group_idx = Self::group_assigned_to_core(core_idx, now)?; + Some(CoreAssignment { core: core_idx, group_idx, paras_entry: pe }) } #[cfg(any(feature = "try-runtime", test))] @@ -569,30 +659,4 @@ impl Pallet { pub(crate) fn claimqueue_is_empty() -> bool { Self::claimqueue_len() == 0 } - - #[cfg(test)] - pub(crate) fn claimqueue_contains_only_none() -> bool { - let mut cq = ClaimQueue::::get(); - for (_, v) in cq.iter_mut() { - v.retain(|e| e.is_some()); - } - - cq.iter().map(|(_, v)| v.len()).sum::() == 0 - } - - #[cfg(test)] - pub(crate) fn claimqueue_contains_para_ids(pids: Vec) -> bool { - use sp_std::collections::btree_set::BTreeSet; - - let set: BTreeSet = ClaimQueue::::get() - .into_iter() - .flat_map(|(_, assignments)| { - assignments - .into_iter() - .filter_map(|assignment| assignment.and_then(|ca| Some(ca.kind.para_id()))) - }) - .collect(); - - pids.into_iter().all(|pid| set.contains(&pid)) - } } diff --git a/runtime/parachains/src/scheduler/migration.rs b/runtime/parachains/src/scheduler/migration.rs index b469e7508dbe..4b5f8c51866c 100644 --- a/runtime/parachains/src/scheduler/migration.rs +++ b/runtime/parachains/src/scheduler/migration.rs @@ -1,14 +1,31 @@ -use super::*; +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. -const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. -pub mod v1 { +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use super::*; +use frame_support::{ + pallet_prelude::ValueQuery, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, +}; +use primitives::vstaging::Assignment; + +mod v0 { use super::*; - use crate::scheduler_common::CoreAssignment; - use frame_support::{ - pallet_prelude::ValueQuery, storage_alias, traits::OnRuntimeUpgrade, weights::Weight, - }; + use primitives::CollatorId; #[storage_alias] pub(super) type Scheduled = StorageValue, Vec, ValueQuery>; @@ -24,6 +41,13 @@ pub mod v1 { next_core_offset: u32, } + // Only here to facilitate the migration. + impl ParathreadClaimQueue { + pub fn len(self) -> usize { + self.queue.len() + } + } + #[storage_alias] pub(super) type ParathreadQueue = StorageValue, ParathreadClaimQueue, ValueQuery>; @@ -32,6 +56,37 @@ pub mod v1 { pub(super) type ParathreadClaimIndex = StorageValue, Vec, ValueQuery>; + /// The assignment type. + #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub enum AssignmentKind { + /// A parachain. + Parachain, + /// A parathread. + Parathread(CollatorId, u32), + } + + /// How a free core is scheduled to be assigned. + #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] + #[cfg_attr(feature = "std", derive(PartialEq))] + pub struct CoreAssignment { + /// The core that is assigned. + pub core: CoreIndex, + /// The unique ID of the para that is assigned to the core. + pub para_id: ParaId, + /// The kind of the assignment. + pub kind: AssignmentKind, + /// The index of the validator group assigned to the core. + pub group_idx: GroupIndex, + } +} + +pub mod v1 { + use super::*; + use frame_support::traits::StorageVersion; + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + pub struct MigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV1 { fn on_runtime_upgrade() -> Weight { @@ -60,14 +115,14 @@ pub mod v1 { log::trace!( target: crate::scheduler::LOG_TARGET, "Scheduled before migration: {}", - Scheduled::::get().len() + v0::Scheduled::::get().len() ); ensure!( StorageVersion::get::>() == 0, "Storage version should be less than `1` before the migration", ); - let bytes = u32::to_be_bytes(Scheduled::::get().len() as u32); + let bytes = u32::to_be_bytes(v0::Scheduled::::get().len() as u32); Ok(bytes.to_vec()) } @@ -80,7 +135,7 @@ pub mod v1 { "Storage version should be `1` after the migration" ); ensure!( - Scheduled::::get().len() == 0, + v0::Scheduled::::get().len() == 0, "Scheduled should be empty after the migration" ); @@ -93,28 +148,31 @@ pub mod v1 { Ok(()) } } +} - pub fn migrate_to_v1() -> Weight { - let mut weight: Weight = Weight::zero(); +pub fn migrate_to_v1() -> Weight { + let mut weight: Weight = Weight::zero(); - let pq = ParathreadQueue::::take(); - let pq_len = pq.queue.len() as u64; + let pq = v0::ParathreadQueue::::take(); + let pq_len = pq.len() as u64; - let pci = ParathreadClaimIndex::::take(); - let pci_len = pci.len() as u64; + let pci = v0::ParathreadClaimIndex::::take(); + let pci_len = pci.len() as u64; - let scheduled = Scheduled::::take(); - let sched_len = scheduled.len() as u64; - for core_assignment in scheduled { - Pallet::::add_to_claimqueue(core_assignment); - } + let now = >::block_number(); + let scheduled = v0::Scheduled::::take(); + let sched_len = scheduled.len() as u64; + for core_assignment in scheduled { + let core_idx = core_assignment.core; + let assignment = Assignment::new(core_assignment.para_id); + let pe = ParasEntry::new(assignment, now); + Pallet::::add_to_claimqueue(core_idx, pe); + } - // 2x as once for Scheduled and once for Claimqueue - weight = - weight.saturating_add(T::DbWeight::get().reads_writes(2 * sched_len, 2 * sched_len)); - weight = weight.saturating_add(T::DbWeight::get().reads_writes(pq_len, pq_len)); - weight = weight.saturating_add(T::DbWeight::get().reads_writes(pci_len, pci_len)); + // 2x as once for Scheduled and once for Claimqueue + weight = weight.saturating_add(T::DbWeight::get().reads_writes(2 * sched_len, 2 * sched_len)); + weight = weight.saturating_add(T::DbWeight::get().reads_writes(pq_len, pq_len)); + weight = weight.saturating_add(T::DbWeight::get().reads_writes(pci_len, pci_len)); - weight - } + weight } diff --git a/runtime/parachains/src/scheduler/tests.rs b/runtime/parachains/src/scheduler/tests.rs index bfd7a2426e3e..3f6aea4b08c1 100644 --- a/runtime/parachains/src/scheduler/tests.rs +++ b/runtime/parachains/src/scheduler/tests.rs @@ -18,21 +18,18 @@ use super::*; use frame_support::assert_ok; use keyring::Sr25519Keyring; -use primitives::{ - BlockNumber, CollatorId, ParathreadClaim, SessionIndex, ValidationCode, ValidatorId, -}; +use primitives::{v5::Assignment, BlockNumber, SessionIndex, ValidationCode, ValidatorId}; +use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; use crate::{ + assigner_on_demand::QueuePushDirection, configuration::HostConfiguration, initializer::SessionChangeNotification, mock::{ - new_test_ext, MockGenesisConfig, Paras, ParasShared, RuntimeOrigin, Scheduler, System, Test, + new_test_ext, MockGenesisConfig, OnDemandAssigner, Paras, ParasShared, RuntimeOrigin, + Scheduler, System, Test, }, paras::{ParaGenesisArgs, ParaKind}, - scheduler_common::Assignment, - //scheduler_parathreads::{ - // ParathreadClaimIndex, ParathreadClaimQueue, ParathreadQueue, QueuedParathread, - //}, }; fn schedule_blank_para(id: ParaId, parakind: ParaKind) { @@ -65,6 +62,8 @@ fn run_to_block( if notification_with_session_index.session_index == SessionIndex::default() { notification_with_session_index.session_index = ParasShared::scheduled_session(); } + Scheduler::pre_new_session(); + Paras::initializer_on_new_session(¬ification_with_session_index); Scheduler::initializer_on_new_session(¬ification_with_session_index); } @@ -92,6 +91,8 @@ fn run_to_end_of_block( Paras::initializer_finalize(to); if let Some(notification) = new_session(to + 1) { + Scheduler::pre_new_session(); + Paras::initializer_on_new_session(¬ification); Scheduler::initializer_on_new_session(¬ification); } @@ -101,13 +102,11 @@ fn run_to_end_of_block( fn default_config() -> HostConfiguration { HostConfiguration { - parathread_cores: 3, + on_demand_cores: 3, group_rotation_frequency: 10, - chain_availability_period: 3, - thread_availability_period: 5, - // most old tests implicitly assume this + paras_availability_period: 3, scheduling_lookahead: 2, - parathread_retries: 1, + on_demand_retries: 1, // This field does not affect anything that scheduler does. However, `HostConfiguration` // is still a subject to consistency test. It requires that `minimum_validation_upgrade_delay` // is greater than `chain_availability_period` and `thread_availability_period`. @@ -116,21 +115,163 @@ fn default_config() -> HostConfiguration { } } -// Pretty useless here. Should be on parathread assigner... if at all -#[test] -fn add_parathread_claim_works() { - let genesis_config = MockGenesisConfig { +fn genesis_config(config: &HostConfiguration) -> MockGenesisConfig { + MockGenesisConfig { configuration: crate::configuration::GenesisConfig { - config: default_config(), + config: config.clone(), ..Default::default() }, ..Default::default() - }; + } +} + +#[cfg(test)] +pub(crate) fn claimqueue_contains_only_none() -> bool { + let mut cq = Scheduler::claimqueue(); + for (_, v) in cq.iter_mut() { + v.retain(|e| e.is_some()); + } + + cq.iter().map(|(_, v)| v.len()).sum::() == 0 +} + +#[cfg(test)] +pub(crate) fn claimqueue_contains_para_ids(pids: Vec) -> bool { + let set: BTreeSet = ClaimQueue::::get() + .into_iter() + .flat_map(|(_, assignments)| { + assignments + .into_iter() + .filter_map(|assignment| assignment.and_then(|pe| Some(pe.para_id()))) + }) + .collect(); + + pids.into_iter().all(|pid| set.contains(&pid)) +} + +#[cfg(test)] +pub(crate) fn availability_cores_contains_para_ids(pids: Vec) -> bool { + let set: BTreeSet = AvailabilityCores::::get() + .into_iter() + .filter_map(|core| match core { + CoreOccupied::Free => None, + CoreOccupied::Paras(entry) => Some(entry.para_id()), + }) + .collect(); + + pids.into_iter().all(|pid| set.contains(&pid)) +} + +#[test] +fn claimqueue_ttl_drop_fn_works() { + let mut config = default_config(); + config.scheduling_lookahead = 3; + let genesis_config = genesis_config(&config); + + let para_id = ParaId::from(100); + let core_idx = CoreIndex::from(0); + let mut now = 10; + + new_test_ext(genesis_config).execute_with(|| { + assert!(default_config().on_demand_ttl == 5); + // Register and run to a blockheight where the para is in a valid state. + schedule_blank_para(para_id, ParaKind::Parathread); + run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); + + // Add a claim on core 0 with a ttl in the past. + let paras_entry = ParasEntry::new(Assignment::new(para_id), now - 5); + Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); + + // Claim is in queue prior to call. + assert!(claimqueue_contains_para_ids::(vec![para_id])); + + // Claim is dropped post call. + Scheduler::drop_expired_claims_from_claimqueue(); + assert!(!claimqueue_contains_para_ids::(vec![para_id])); + + // Add a claim on core 0 with a ttl in the future (15). + let paras_entry = ParasEntry::new(Assignment::new(para_id), now + 5); + Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); + + // Claim is in queue post call. + Scheduler::drop_expired_claims_from_claimqueue(); + assert!(claimqueue_contains_para_ids::(vec![para_id])); + + now = now + 6; + run_to_block(now, |_| None); + + // Claim is dropped + Scheduler::drop_expired_claims_from_claimqueue(); + assert!(!claimqueue_contains_para_ids::(vec![para_id])); + + // Add a claim on core 0 with a ttl == now (16) + let paras_entry = ParasEntry::new(Assignment::new(para_id), now); + Scheduler::add_to_claimqueue(core_idx, paras_entry.clone()); + + // Claim is in queue post call. + Scheduler::drop_expired_claims_from_claimqueue(); + assert!(claimqueue_contains_para_ids::(vec![para_id])); + + now = now + 1; + run_to_block(now, |_| None); + + // Drop expired claim. + Scheduler::drop_expired_claims_from_claimqueue(); + + // Add a claim on core 0 with a ttl == now (17) + let paras_entry_non_expired = ParasEntry::new(Assignment::new(para_id), now); + let paras_entry_expired = ParasEntry::new(Assignment::new(para_id), now - 2); + // ttls = [17, 15, 17] + Scheduler::add_to_claimqueue(core_idx, paras_entry_non_expired.clone()); + Scheduler::add_to_claimqueue(core_idx, paras_entry_expired.clone()); + Scheduler::add_to_claimqueue(core_idx, paras_entry_non_expired.clone()); + let cq = Scheduler::claimqueue(); + assert!(cq.get(&core_idx).unwrap().len() == 3); + + // Add claims to on demand assignment provider. + let assignment = Assignment::new(para_id); + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment.clone(), + QueuePushDirection::Back + )); + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment, + QueuePushDirection::Back + )); + + // Drop expired claim. + Scheduler::drop_expired_claims_from_claimqueue(); + + let cq = Scheduler::claimqueue(); + let cqc = cq.get(&core_idx).unwrap(); + // Same number of claims + assert!(cqc.len() == 3); + + // The first 2 claims in the queue should have a ttl of 17, + // being the ones set up prior in this test as claims 1 and 3. + // The third claim is popped from the assignment provider and + // has a new ttl set by the scheduler of now + config.on_demand_ttl. + // ttls = [17, 17, 22] + assert!(cqc.iter().enumerate().all(|(index, entry)| { + match index { + 0 | 1 => return entry.clone().unwrap().ttl == 17, + 2 => return entry.clone().unwrap().ttl == 22, + _ => return false, + } + })) + }); +} + +// Pretty useless here. Should be on parathread assigner... if at all +#[test] +fn add_parathread_claim_works() { + let genesis_config = genesis_config(&default_config()); let thread_id = ParaId::from(10); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); let core_index = CoreIndex::from(0); - let group_index = GroupIndex::from(0); + let entry_ttl = 10_000; new_test_ext(genesis_config).execute_with(|| { schedule_blank_para(thread_id, ParaKind::Parathread); @@ -141,218 +282,70 @@ fn add_parathread_claim_works() { assert!(Paras::is_parathread(thread_id)); - let assignment = Assignment::ParathreadA(ParathreadEntry { - claim: ParathreadClaim(thread_id, Some(collator.clone())), - retries: 0, - }); - let ca = assignment.to_core_assignment(core_index, group_index); - Scheduler::add_to_claimqueue(ca.clone()); + let pe = ParasEntry::new(Assignment::new(thread_id), entry_ttl); + Scheduler::add_to_claimqueue(core_index, pe.clone()); let cq = Scheduler::claimqueue(); assert_eq!(Scheduler::claimqueue_len(), 1); - assert_eq!(*(cq.get(&core_index).unwrap().front().unwrap()), Some(ca)); + assert_eq!(*(cq.get(&core_index).unwrap().front().unwrap()), Some(pe)); }) } -/// MOVE TO polkadot provider -//#[test] -//fn cannot_add_claim_when_no_parathread_cores() { -// let config = { -// let mut config = default_config(); -// config.parathread_cores = 0; -// config -// }; -// let genesis_config = MockGenesisConfig { -// configuration: crate::configuration::GenesisConfig { config, ..Default::default() }, -// ..Default::default() -// }; -// -// let thread_id = ParaId::from(10); -// let collator = CollatorId::from(Sr25519Keyring::Alice.public()); -// -// new_test_ext(genesis_config).execute_with(|| { -// schedule_blank_para(thread_id, ParaKind::Parathread); -// -// assert!(!Paras::is_parathread(thread_id)); -// -// run_to_block(10, |n| if n == 10 { Some(Default::default()) } else { None }); -// -// assert!(Paras::is_parathread(thread_id)); -// -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_id, collator.clone())); -// assert_eq!(ParathreadQueue::::get(), Default::default()); -// }); -//} - -//#[test] -//fn session_change_prunes_cores_beyond_retries_and_those_from_non_live_parathreads() { -// let genesis_config = MockGenesisConfig { -// configuration: crate::configuration::GenesisConfig { -// config: default_config(), -// ..Default::default() -// }, -// ..Default::default() -// }; -// let max_parathread_retries = default_config().parathread_retries; -// -// let thread_a = ParaId::from(1_u32); -// let thread_b = ParaId::from(2_u32); -// let thread_c = ParaId::from(3_u32); -// let thread_d = ParaId::from(4_u32); -// -// let collator = CollatorId::from(Sr25519Keyring::Alice.public()); -// -// new_test_ext(genesis_config).execute_with(|| { -// assert_eq!(Configuration::config(), default_config()); -// -// // threads a, b, and c will be live in next session, but not d. -// { -// schedule_blank_para(thread_a, ParaKind::Parathread); -// schedule_blank_para(thread_b, ParaKind::Parathread); -// schedule_blank_para(thread_c, ParaKind::Parathread); -// } -// -// // set up a queue as if `n_cores` was 4 and with some with many retries. -// ParathreadQueue::::put({ -// let mut queue = ParathreadClaimQueue::default(); -// -// // Will be pruned: too many retries. -// queue.enqueue_entry( -// ParathreadEntry { -// claim: ParathreadClaim(thread_a, collator.clone()), -// retries: max_parathread_retries + 1, -// }, -// 4, -// ); -// -// // Will not be pruned. -// queue.enqueue_entry( -// ParathreadEntry { -// claim: ParathreadClaim(thread_b, collator.clone()), -// retries: max_parathread_retries, -// }, -// 4, -// ); -// -// // Will not be pruned. -// queue.enqueue_entry( -// ParathreadEntry { claim: ParathreadClaim(thread_c, collator.clone()), retries: 0 }, -// 4, -// ); -// -// // Will be pruned: not a live parathread. -// queue.enqueue_entry( -// ParathreadEntry { claim: ParathreadClaim(thread_d, collator.clone()), retries: 0 }, -// 4, -// ); -// -// queue -// }); -// -// ParathreadClaimIndex::::put(vec![thread_a, thread_b, thread_c, thread_d]); -// -// run_to_block(10, |b| match b { -// 10 => Some(SessionChangeNotification { -// new_config: Configuration::config(), -// ..Default::default() -// }), -// _ => None, -// }); -// assert_eq!(Configuration::config(), default_config()); -// -// let queue = ParathreadQueue::::get(); -// assert_eq!( -// queue.queue, -// vec![ -// QueuedParathread { -// claim: ParathreadEntry { -// claim: ParathreadClaim(thread_b, collator.clone()), -// retries: max_parathread_retries, -// }, -// core_offset: 0, -// }, -// QueuedParathread { -// claim: ParathreadEntry { -// claim: ParathreadClaim(thread_c, collator.clone()), -// retries: 0, -// }, -// core_offset: 1, -// }, -// ] -// ); -// assert_eq!(queue.next_core_offset, 2); -// -// assert_eq!(ParathreadClaimIndex::::get(), vec![thread_b, thread_c]); -// }) -//} -// -//#[test] -//fn session_change_shuffles_validators() { -// let genesis_config = MockGenesisConfig { -// configuration: crate::configuration::GenesisConfig { -// config: default_config(), -// ..Default::default() -// }, -// ..Default::default() -// }; -// -// assert_eq!(default_config().parathread_cores, 3); -// new_test_ext(genesis_config).execute_with(|| { -// let chain_a = ParaId::from(1_u32); -// let chain_b = ParaId::from(2_u32); -// -// // ensure that we have 5 groups by registering 2 parachains. -// schedule_blank_para(chain_a, ParaKind::Parachain); -// schedule_blank_para(chain_b, ParaKind::Parachain); -// -// run_to_block(1, |number| match number { -// 1 => Some(SessionChangeNotification { -// new_config: default_config(), -// validators: vec![ -// ValidatorId::from(Sr25519Keyring::Alice.public()), -// ValidatorId::from(Sr25519Keyring::Bob.public()), -// ValidatorId::from(Sr25519Keyring::Charlie.public()), -// ValidatorId::from(Sr25519Keyring::Dave.public()), -// ValidatorId::from(Sr25519Keyring::Eve.public()), -// ValidatorId::from(Sr25519Keyring::Ferdie.public()), -// ValidatorId::from(Sr25519Keyring::One.public()), -// ], -// random_seed: [99; 32], -// ..Default::default() -// }), -// _ => None, -// }); -// -// let groups = ValidatorGroups::::get(); -// assert_eq!(groups.len(), 5); -// -// // first two groups have the overflow. -// for i in 0..2 { -// assert_eq!(groups[i].len(), 2); -// } -// -// for i in 2..5 { -// assert_eq!(groups[i].len(), 1); -// } -// }); -//} +#[test] +fn session_change_shuffles_validators() { + let genesis_config = genesis_config(&default_config()); + + assert_eq!(default_config().on_demand_cores, 3); + new_test_ext(genesis_config).execute_with(|| { + let chain_a = ParaId::from(1_u32); + let chain_b = ParaId::from(2_u32); + + // ensure that we have 5 groups by registering 2 parachains. + schedule_blank_para(chain_a, ParaKind::Parachain); + schedule_blank_para(chain_b, ParaKind::Parachain); + + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: default_config(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Bob.public()), + ValidatorId::from(Sr25519Keyring::Charlie.public()), + ValidatorId::from(Sr25519Keyring::Dave.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ValidatorId::from(Sr25519Keyring::Ferdie.public()), + ValidatorId::from(Sr25519Keyring::One.public()), + ], + random_seed: [99; 32], + ..Default::default() + }), + _ => None, + }); + + let groups = ValidatorGroups::::get(); + assert_eq!(groups.len(), 5); + + // first two groups have the overflow. + for i in 0..2 { + assert_eq!(groups[i].len(), 2); + } + + for i in 2..5 { + assert_eq!(groups[i].len(), 1); + } + }); +} #[test] fn session_change_takes_only_max_per_core() { let config = { let mut config = default_config(); - config.parathread_cores = 0; + config.on_demand_cores = 0; config.max_validators_per_core = Some(1); config }; - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: config.clone(), - ..Default::default() - }, - ..Default::default() - }; + let genesis_config = genesis_config(&config); new_test_ext(genesis_config).execute_with(|| { let chain_a = ParaId::from(1_u32); @@ -394,13 +387,7 @@ fn session_change_takes_only_max_per_core() { #[test] fn fill_claimqueue_fills() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, - ..Default::default() - }; + let genesis_config = genesis_config(&default_config()); let lookahead = genesis_config.configuration.config.scheduling_lookahead as usize; let chain_a = ParaId::from(1_u32); @@ -410,10 +397,12 @@ fn fill_claimqueue_fills() { let thread_b = ParaId::from(4_u32); let thread_c = ParaId::from(5_u32); - //let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + let assignment_a = Assignment { para_id: thread_a }; + let assignment_b = Assignment { para_id: thread_b }; + let assignment_c = Assignment { para_id: thread_c }; new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().parathread_cores, 3); + assert_eq!(default_config().on_demand_cores, 3); // register 2 parachains schedule_blank_para(chain_a, ParaKind::Parachain); @@ -442,273 +431,355 @@ fn fill_claimqueue_fills() { { assert_eq!(Scheduler::claimqueue_len(), 2 * lookahead); - //let cq = Scheduler::claimqueue(); + let scheduled = Scheduler::scheduled_claimqueue(1); // Cannot assert on indices anymore as they depend on the assignment providers - assert!(Scheduler::claimqueue_contains_para_ids(vec![chain_a, chain_b])); - // TODO: checks for group indices? - //assert_eq!( - // scheduled[0], - // CoreAssignment { - // core: CoreIndex(0), - // kind: Assignment::Parachain(chain_a), - // group_idx: GroupIndex(0), - // } - //); - - //assert_eq!( - // scheduled[1], - // CoreAssignment { - // core: CoreIndex(1), - // kind: Assignment::Parachain(chain_b), - // group_idx: GroupIndex(1), - // } - //); + assert!(claimqueue_contains_para_ids::(vec![chain_a, chain_b])); + + assert_eq!( + scheduled[0], + CoreAssignment { + core: CoreIndex(0), + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 6 + }, + group_idx: GroupIndex(0), + } + ); + + assert_eq!( + scheduled[1], + CoreAssignment { + core: CoreIndex(1), + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_b }, + availability_timeouts: 0, + ttl: 6 + }, + group_idx: GroupIndex(1), + } + ); } - // add a couple of parathread claims. - //SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); - //SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_c, collator.clone())); - - //run_to_block(2, |_| None); - - //{ - - // assert_eq!(Scheduler::claimqueue_len(), 4); - // let cq = Scheduler::claimqueue(); - - // assert_eq!( - // scheduled[0], - // CoreAssignment { - // core: CoreIndex(0), - // kind: Assignment::Parachain(chain_a), - // group_idx: GroupIndex(0), - // } - // ); - - // assert_eq!( - // scheduled[1], - // CoreAssignment { - // core: CoreIndex(1), - // kind: Assignment::Parachain(chain_b), - // group_idx: GroupIndex(1), - // } - // ); - - // assert_eq!( - // scheduled[2], - // CoreAssignment { - // core: CoreIndex(2), - // kind: Assignment::Parathread(ParathreadEntry {claim: ParathreadClaim(thread_a, Some(collator.clone())), retries: 0}), - // group_idx: GroupIndex(2), - // } - // ); - - // assert_eq!( - // scheduled[3], - // CoreAssignment { - // core: CoreIndex(3), - // kind: Assignment::Parathread(ParathreadEntry {claim: ParathreadClaim(thread_c, Some(collator.clone())), retries: 0}), - // group_idx: GroupIndex(3), - // } - // ); - //} + // add a couple of parathread assignments. + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_b, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_c, + QueuePushDirection::Back + )); + + run_to_block(2, |_| None); + // cores 0 and 1 should be occupied. mark them as such. + Scheduler::occupied( + vec![(CoreIndex(0), chain_a), (CoreIndex(1), chain_b)].into_iter().collect(), + ); + + run_to_block(3, |_| None); + + { + assert_eq!(Scheduler::claimqueue_len(), 5); + let scheduled = Scheduler::scheduled_claimqueue(3); + + assert_eq!( + scheduled[0], + CoreAssignment { + core: CoreIndex(0), + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 6 + }, + group_idx: GroupIndex(0), + } + ); + assert_eq!( + scheduled[1], + CoreAssignment { + core: CoreIndex(1), + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_b }, + availability_timeouts: 0, + ttl: 6 + }, + group_idx: GroupIndex(1), + } + ); + + // Was added a block later, note the TTL. + assert_eq!( + scheduled[2], + CoreAssignment { + core: CoreIndex(2), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_a }, + availability_timeouts: 0, + ttl: 7 + }, + group_idx: GroupIndex(2), + } + ); + // Sits on the same core as `thread_a` + assert_eq!( + Scheduler::claimqueue().get(&CoreIndex(2)).unwrap()[1], + Some(ParasEntry { + assignment: Assignment { para_id: thread_b }, + availability_timeouts: 0, + ttl: 7 + }) + ); + assert_eq!( + scheduled[3], + CoreAssignment { + core: CoreIndex(3), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_c }, + availability_timeouts: 0, + ttl: 7 + }, + group_idx: GroupIndex(3), + } + ); + } }); } -//#[test] -//fn schedule_schedules_including_just_freed() { -// let genesis_config = MockGenesisConfig { -// configuration: crate::configuration::GenesisConfig { -// config: default_config(), -// ..Default::default() -// }, -// ..Default::default() -// }; -// -// let chain_a = ParaId::from(1_u32); -// let chain_b = ParaId::from(2_u32); -// -// let thread_a = ParaId::from(3_u32); -// let thread_b = ParaId::from(4_u32); -// let thread_c = ParaId::from(5_u32); -// let thread_d = ParaId::from(6_u32); -// let thread_e = ParaId::from(7_u32); -// -// let collator = CollatorId::from(Sr25519Keyring::Alice.public()); -// -// new_test_ext(genesis_config).execute_with(|| { -// assert_eq!(default_config().parathread_cores, 3); -// -// // register 2 parachains -// schedule_blank_para(chain_a, ParaKind::Parachain); -// schedule_blank_para(chain_b, ParaKind::Parachain); -// -// // and 5 parathreads -// schedule_blank_para(thread_a, ParaKind::Parathread); -// schedule_blank_para(thread_b, ParaKind::Parathread); -// schedule_blank_para(thread_c, ParaKind::Parathread); -// schedule_blank_para(thread_d, ParaKind::Parathread); -// schedule_blank_para(thread_e, ParaKind::Parathread); -// -// // start a new session to activate, 5 validators for 5 cores. -// run_to_block(1, |number| match number { -// 1 => Some(SessionChangeNotification { -// new_config: default_config(), -// validators: vec![ -// ValidatorId::from(Sr25519Keyring::Alice.public()), -// ValidatorId::from(Sr25519Keyring::Bob.public()), -// ValidatorId::from(Sr25519Keyring::Charlie.public()), -// ValidatorId::from(Sr25519Keyring::Dave.public()), -// ValidatorId::from(Sr25519Keyring::Eve.public()), -// ], -// ..Default::default() -// }), -// _ => None, -// }); -// -// // add a couple of parathread claims now that the parathreads are live. -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_c, collator.clone())); -// -// run_to_block(2, |_| None); -// -// assert_eq!(Scheduler::scheduled().len(), 4); -// -// // cores 0, 1, 2, and 3 should be occupied. mark them as such. -// Scheduler::occupied(&[CoreIndex(0), CoreIndex(1), CoreIndex(2), CoreIndex(3)]); -// -// { -// let cores = AvailabilityCores::::get(); -// -// assert!(cores[0].is_some()); -// assert!(cores[1].is_some()); -// assert!(cores[2].is_some()); -// assert!(cores[3].is_some()); -// assert!(cores[4].is_none()); -// -// assert!(Scheduler::scheduled().is_empty()); -// } -// -// // add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core (4) -// // and the claim on `d` will go back to the 1st parathread core (2). The claim on `e` then -// // will go for core `3`. -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_d, collator.clone())); -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_e, collator.clone())); -// -// run_to_block(3, |_| None); -// -// { -// let scheduled = Scheduler::scheduled(); -// -// // cores 0 and 1 are occupied by parachains. cores 2 and 3 are occupied by parathread -// // claims. core 4 was free. -// assert_eq!(scheduled.len(), 1); -// assert_eq!( -// scheduled[0], -// CoreAssignment { -// core: CoreIndex(4), -// para_id: thread_b, -// kind: Assignment::Parathread(collator.clone(), 0), -// group_idx: GroupIndex(4), -// } -// ); -// } -// -// // now note that cores 0, 2, and 3 were freed. -// Scheduler::schedule( -// vec![ -// (CoreIndex(0), FreedReason::Concluded), -// (CoreIndex(2), FreedReason::Concluded), -// (CoreIndex(3), FreedReason::TimedOut), // should go back on queue. -// ] -// .into_iter() -// .collect(), -// 3, -// ); -// -// { -// let scheduled = Scheduler::scheduled(); -// -// // 1 thing scheduled before, + 3 cores freed. -// assert_eq!(scheduled.len(), 4); -// assert_eq!( -// scheduled[0], -// CoreAssignment { -// core: CoreIndex(0), -// para_id: chain_a, -// kind: Assignment::Parachain, -// group_idx: GroupIndex(0), -// } -// ); -// assert_eq!( -// scheduled[1], -// CoreAssignment { -// core: CoreIndex(2), -// para_id: thread_d, -// kind: Assignment::Parathread(collator.clone(), 0), -// group_idx: GroupIndex(2), -// } -// ); -// assert_eq!( -// scheduled[2], -// CoreAssignment { -// core: CoreIndex(3), -// para_id: thread_e, -// kind: Assignment::Parathread(collator.clone(), 0), -// group_idx: GroupIndex(3), -// } -// ); -// assert_eq!( -// scheduled[3], -// CoreAssignment { -// core: CoreIndex(4), -// para_id: thread_b, -// kind: Assignment::Parathread(collator.clone(), 0), -// group_idx: GroupIndex(4), -// } -// ); -// -// // the prior claim on thread A concluded, but the claim on thread C was marked as -// // timed out. -// let index = ParathreadClaimIndex::::get(); -// let parathread_queue = ParathreadQueue::::get(); -// -// // thread A claim should have been wiped, but thread C claim should remain. -// assert_eq!(index, vec![thread_b, thread_c, thread_d, thread_e]); -// -// // Although C was descheduled, the core `4` was occupied so C goes back on the queue. -// assert_eq!(parathread_queue.queue.len(), 1); -// assert_eq!( -// parathread_queue.queue[0], -// QueuedParathread { -// claim: ParathreadEntry { -// claim: ParathreadClaim(thread_c, collator.clone()), -// retries: 0, // retries not incremented by timeout - validators' fault. -// }, -// core_offset: 2, // reassigned to next core. thread_e claim was on offset 1. -// } -// ); -// } -// }); -//} +#[test] +fn schedule_schedules_including_just_freed() { + let mut config = default_config(); + // NOTE: This test expects on demand cores to each get slotted on to a different core + // and not fill up the claimqueue of each core first. + config.scheduling_lookahead = 1; + let genesis_config = genesis_config(&config); + + let chain_a = ParaId::from(1_u32); + let chain_b = ParaId::from(2_u32); + + let thread_a = ParaId::from(3_u32); + let thread_b = ParaId::from(4_u32); + let thread_c = ParaId::from(5_u32); + let thread_d = ParaId::from(6_u32); + let thread_e = ParaId::from(7_u32); + + let assignment_a = Assignment { para_id: thread_a }; + let assignment_b = Assignment { para_id: thread_b }; + let assignment_c = Assignment { para_id: thread_c }; + let assignment_d = Assignment { para_id: thread_d }; + let assignment_e = Assignment { para_id: thread_e }; + + new_test_ext(genesis_config).execute_with(|| { + assert_eq!(default_config().on_demand_cores, 3); + + // register 2 parachains + schedule_blank_para(chain_a, ParaKind::Parachain); + schedule_blank_para(chain_b, ParaKind::Parachain); + + // and 5 parathreads + schedule_blank_para(thread_a, ParaKind::Parathread); + schedule_blank_para(thread_b, ParaKind::Parathread); + schedule_blank_para(thread_c, ParaKind::Parathread); + schedule_blank_para(thread_d, ParaKind::Parathread); + schedule_blank_para(thread_e, ParaKind::Parathread); + + // start a new session to activate, 5 validators for 5 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: default_config(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Bob.public()), + ValidatorId::from(Sr25519Keyring::Charlie.public()), + ValidatorId::from(Sr25519Keyring::Dave.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + // add a couple of parathread claims now that the parathreads are live. + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_c, + QueuePushDirection::Back + )); + + let mut now = 2; + run_to_block(now, |_| None); + + assert_eq!(Scheduler::scheduled_claimqueue(now).len(), 4); + + // cores 0, 1, 2, and 3 should be occupied. mark them as such. + let mut occupied_map: BTreeMap = BTreeMap::new(); + occupied_map.insert(CoreIndex(0), chain_a); + occupied_map.insert(CoreIndex(1), chain_b); + occupied_map.insert(CoreIndex(2), thread_a); + occupied_map.insert(CoreIndex(3), thread_c); + Scheduler::occupied(occupied_map); + + { + let cores = AvailabilityCores::::get(); + + // cores 0, 1, 2, and 3 are all `CoreOccupied::Paras(ParasEntry...)` + assert!(cores[0] != CoreOccupied::Free); + assert!(cores[1] != CoreOccupied::Free); + assert!(cores[2] != CoreOccupied::Free); + assert!(cores[3] != CoreOccupied::Free); + + // core 4 is free + assert!(cores[4] == CoreOccupied::Free); + + assert!(Scheduler::scheduled_claimqueue(now).is_empty()); + + // All core index entries in the claimqueue should have `None` in them. + Scheduler::claimqueue().iter().for_each(|(_core_idx, core_queue)| { + assert!(core_queue.iter().all(|claim| claim.is_none())) + }) + } + + // add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core (4) + // and the claim on `d` will go back to the 1st parathread core (2). The claim on `e` then + // will go for core `3`. + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_b, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_d, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_e.clone(), + QueuePushDirection::Back + )); + now = 3; + run_to_block(now, |_| None); + + { + let scheduled = Scheduler::scheduled_claimqueue(now); + + // cores 0 and 1 are occupied by parachains. cores 2 and 3 are occupied by parathread + // claims. core 4 was free. + assert_eq!(scheduled.len(), 1); + assert_eq!( + scheduled[0], + CoreAssignment { + core: CoreIndex(4), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_b }, + availability_timeouts: 0, + ttl: 8 + }, + group_idx: GroupIndex(4), + } + ); + } + + // now note that cores 0, 2, and 3 were freed. + let just_updated: BTreeMap = vec![ + (CoreIndex(0), FreedReason::Concluded), + (CoreIndex(2), FreedReason::Concluded), + (CoreIndex(3), FreedReason::TimedOut), // should go back on queue. + ] + .into_iter() + .collect(); + Scheduler::update_claimqueue(just_updated, now); + + { + let scheduled = Scheduler::scheduled_claimqueue(now); + + // 1 thing scheduled before, + 3 cores freed. + assert_eq!(scheduled.len(), 4); + assert_eq!( + scheduled[0], + CoreAssignment { + core: CoreIndex(0), + paras_entry: ParasEntry { + assignment: Assignment { para_id: chain_a }, + availability_timeouts: 0, + ttl: 8 + }, + group_idx: GroupIndex(0), + } + ); + assert_eq!( + scheduled[1], + CoreAssignment { + core: CoreIndex(2), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_d }, + availability_timeouts: 0, + ttl: 8 + }, + group_idx: GroupIndex(2), + } + ); + // Although C was descheduled, the core `4` was occupied so C goes back to the queue. + assert_eq!( + scheduled[2], + CoreAssignment { + core: CoreIndex(3), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_c }, + availability_timeouts: 1, + ttl: 8 + }, + group_idx: GroupIndex(3), + } + ); + assert_eq!( + scheduled[3], + CoreAssignment { + core: CoreIndex(4), + paras_entry: ParasEntry { + assignment: Assignment { para_id: thread_b }, + availability_timeouts: 0, + ttl: 8 + }, + group_idx: GroupIndex(4), + } + ); + + // The only assignment yet to be popped on to the claim queue is `thread_e`. + // This is due to `thread_c` timing out. + let order_queue = OnDemandAssigner::get_queue(); + assert!(order_queue.len() == 1); + assert!(order_queue[0] == assignment_e); + + // Chain B's core was not marked concluded or timed out, it should be on an + // availability core + assert!(availability_cores_contains_para_ids::(vec![chain_b])); + // Thread A claim should have been wiped, but thread C claim should remain. + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + assert!(claimqueue_contains_para_ids::(vec![thread_c])); + assert!(!availability_cores_contains_para_ids::(vec![thread_a, thread_c])); + } + }); +} #[test] fn schedule_clears_availability_cores() { let mut config = default_config(); config.scheduling_lookahead = 1; - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config, ..Default::default() }, - ..Default::default() - }; + let genesis_config = genesis_config(&config); let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); let chain_c = ParaId::from(3_u32); new_test_ext(genesis_config).execute_with(|| { - assert_eq!(default_config().parathread_cores, 3); + assert_eq!(default_config().on_demand_cores, 3); // register 3 parachains schedule_blank_para(chain_a, ParaKind::Parachain); @@ -749,41 +820,33 @@ fn schedule_clears_availability_cores() { assert_eq!(cores[1].is_free(), false); assert_eq!(cores[2].is_free(), false); - assert!(Scheduler::claimqueue_contains_only_none()); + assert!(claimqueue_contains_only_none()); } run_to_block(3, |_| None); // now note that cores 0 and 2 were freed. - Scheduler::fill_claimqueue( + Scheduler::free_cores_and_fill_claimqueue( vec![(CoreIndex(0), FreedReason::Concluded), (CoreIndex(2), FreedReason::Concluded)] .into_iter() - .collect(), + .collect::>(), 3, ); { let claimqueue = Scheduler::claimqueue(); - let claimqueue_0 = claimqueue.get(&CoreIndex(0)).unwrap().clone(); let claimqueue_2 = claimqueue.get(&CoreIndex(2)).unwrap().clone(); + let entry_ttl = 8; assert_eq!(claimqueue_0.len(), 1); assert_eq!(claimqueue_2.len(), 1); assert_eq!( claimqueue_0, - vec![Some(CoreAssignment { - core: CoreIndex(0), - kind: Assignment::Parachain(chain_a), - group_idx: GroupIndex(0), - })] + vec![Some(ParasEntry::new(Assignment::new(chain_a), entry_ttl))], ); assert_eq!( claimqueue_2, - vec![Some(CoreAssignment { - core: CoreIndex(2), - kind: Assignment::Parachain(chain_c), - group_idx: GroupIndex(2), - })] + vec![Some(ParasEntry::new(Assignment::new(chain_c), entry_ttl))], ); // The freed cores should be `Free` in `AvailabilityCores`. @@ -794,160 +857,245 @@ fn schedule_clears_availability_cores() { }); } -//#[test] -//fn schedule_rotates_groups() { -// let config = { -// let mut config = default_config(); -// -// // make sure parathread requests don't retry-out -// config.parathread_retries = config.group_rotation_frequency * 3; -// config.parathread_cores = 2; -// config -// }; -// -// let rotation_frequency = config.group_rotation_frequency; -// let parathread_cores = config.parathread_cores; -// -// let genesis_config = MockGenesisConfig { -// configuration: crate::configuration::GenesisConfig { -// config: config.clone(), -// ..Default::default() -// }, -// ..Default::default() -// }; -// -// let thread_a = ParaId::from(1_u32); -// let thread_b = ParaId::from(2_u32); -// -// let collator = CollatorId::from(Sr25519Keyring::Alice.public()); -// -// new_test_ext(genesis_config).execute_with(|| { -// assert_eq!(default_config().parathread_cores, 3); -// -// schedule_blank_para(thread_a, ParaKind::Parathread); -// schedule_blank_para(thread_b, ParaKind::Parathread); -// -// // start a new session to activate, 5 validators for 5 cores. -// run_to_block(1, |number| match number { -// 1 => Some(SessionChangeNotification { -// new_config: config.clone(), -// validators: vec![ -// ValidatorId::from(Sr25519Keyring::Alice.public()), -// ValidatorId::from(Sr25519Keyring::Eve.public()), -// ], -// ..Default::default() -// }), -// _ => None, -// }); -// -// let session_start_block = ::SessionStartBlock::get(); -// assert_eq!(session_start_block, 1); -// -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); -// -// run_to_block(2, |_| None); -// -// let assert_groups_rotated = |rotations: u32| { -// let scheduled = Scheduler::scheduled(); -// assert_eq!(scheduled.len(), 2); -// assert_eq!(scheduled[0].group_idx, GroupIndex((0u32 + rotations) % parathread_cores)); -// assert_eq!(scheduled[1].group_idx, GroupIndex((1u32 + rotations) % parathread_cores)); -// }; -// -// assert_groups_rotated(0); -// -// // one block before first rotation. -// run_to_block(rotation_frequency, |_| None); -// -// assert_groups_rotated(0); -// -// // first rotation. -// run_to_block(rotation_frequency + 1, |_| None); -// assert_groups_rotated(1); -// -// // one block before second rotation. -// run_to_block(rotation_frequency * 2, |_| None); -// assert_groups_rotated(1); -// -// // second rotation. -// run_to_block(rotation_frequency * 2 + 1, |_| None); -// assert_groups_rotated(2); -// }); -//} - -//#[test] -//fn parathread_claims_are_pruned_after_retries() { -// let max_retries = default_config().parathread_retries; -// -// let genesis_config = MockGenesisConfig { -// configuration: crate::configuration::GenesisConfig { -// config: default_config(), -// ..Default::default() -// }, -// ..Default::default() -// }; -// -// let thread_a = ParaId::from(1_u32); -// let thread_b = ParaId::from(2_u32); -// -// let collator = CollatorId::from(Sr25519Keyring::Alice.public()); -// -// new_test_ext(genesis_config).execute_with(|| { -// assert_eq!(default_config().parathread_cores, 3); -// -// schedule_blank_para(thread_a, ParaKind::Parathread); -// schedule_blank_para(thread_b, ParaKind::Parathread); -// -// // start a new session to activate, 5 validators for 5 cores. -// run_to_block(1, |number| match number { -// 1 => Some(SessionChangeNotification { -// new_config: default_config(), -// validators: vec![ -// ValidatorId::from(Sr25519Keyring::Alice.public()), -// ValidatorId::from(Sr25519Keyring::Eve.public()), -// ], -// ..Default::default() -// }), -// _ => None, -// }); -// -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); -// -// run_to_block(2, |_| None); -// assert_eq!(Scheduler::scheduled().len(), 2); -// -// run_to_block(2 + max_retries, |_| None); -// assert_eq!(Scheduler::scheduled().len(), 2); -// -// run_to_block(2 + max_retries + 1, |_| None); -// assert_eq!(Scheduler::scheduled().len(), 0); -// }); -//} +#[test] +fn schedule_rotates_groups() { + let config = { + let mut config = default_config(); + + // make sure on demand requests don't retry-out + config.on_demand_retries = config.group_rotation_frequency * 3; + config.on_demand_cores = 2; + config.scheduling_lookahead = 1; + config + }; + + let rotation_frequency = config.group_rotation_frequency; + let on_demand_cores = config.on_demand_cores; + + let genesis_config = genesis_config(&config); + + let thread_a = ParaId::from(1_u32); + let thread_b = ParaId::from(2_u32); + + let assignment_a = Assignment { para_id: thread_a }; + let assignment_b = Assignment { para_id: thread_b }; + + new_test_ext(genesis_config).execute_with(|| { + assert_eq!(default_config().on_demand_cores, 3); + + schedule_blank_para(thread_a, ParaKind::Parathread); + schedule_blank_para(thread_b, ParaKind::Parathread); + + // start a new session to activate, 5 validators for 5 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: config.clone(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + let session_start_block = Scheduler::session_start_block(); + assert_eq!(session_start_block, 1); + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a, + QueuePushDirection::Back + )); + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_b, + QueuePushDirection::Back + )); + + let mut now = 2; + run_to_block(now, |_| None); + + let assert_groups_rotated = |rotations: u32, now: &BlockNumberFor| { + let scheduled = Scheduler::scheduled_claimqueue(now.clone()); + assert_eq!(scheduled.len(), 2); + assert_eq!(scheduled[0].group_idx, GroupIndex((0u32 + rotations) % on_demand_cores)); + assert_eq!(scheduled[1].group_idx, GroupIndex((1u32 + rotations) % on_demand_cores)); + }; + + assert_groups_rotated(0, &now); + + // one block before first rotation. + now = rotation_frequency; + run_to_block(rotation_frequency, |_| None); + + assert_groups_rotated(0, &now); + + // first rotation. + now = now + 1; + run_to_block(now, |_| None); + assert_groups_rotated(1, &now); + + // one block before second rotation. + now = rotation_frequency * 2; + run_to_block(now, |_| None); + assert_groups_rotated(1, &now); + + // second rotation. + now = now + 1; + run_to_block(now, |_| None); + assert_groups_rotated(2, &now); + }); +} + +#[test] +fn on_demand_claims_are_pruned_after_timing_out() { + let max_retries = 20; + let mut config = default_config(); + config.scheduling_lookahead = 1; + config.on_demand_cores = 2; + config.on_demand_retries = max_retries; + let genesis_config = genesis_config(&config); + + let thread_a = ParaId::from(1_u32); + + let assignment_a = Assignment { para_id: thread_a }; + + new_test_ext(genesis_config).execute_with(|| { + schedule_blank_para(thread_a, ParaKind::Parathread); + + // #1 + let mut now = 1; + run_to_block(now, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: default_config(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a.clone(), + QueuePushDirection::Back + )); + + // #2 + now += 1; + run_to_block(now, |_| None); + assert_eq!(Scheduler::claimqueue().len(), 1); + // ParaId a is in the claimqueue. + assert!(claimqueue_contains_para_ids::(vec![thread_a])); + + Scheduler::occupied(vec![(CoreIndex(0), thread_a)].into_iter().collect()); + // ParaId a is no longer in the claimqueue. + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + // It is in availability cores. + assert!(availability_cores_contains_para_ids::(vec![thread_a])); + + // #3 + now += 1; + // Run to block #n over the max_retries value. + // In this case, both validator groups with time out on availability and + // the assignment will be dropped. + for n in now..=(now + max_retries + 1) { + // #n + run_to_block(n, |_| None); + // Time out on core 0. + let just_updated: BTreeMap = vec![ + (CoreIndex(0), FreedReason::TimedOut), // should go back on queue. + ] + .into_iter() + .collect(); + let core_assignments = Scheduler::update_claimqueue(just_updated, now); + + // ParaId a exists in the claim queue until max_retries is reached. + if n < max_retries + now { + assert!(claimqueue_contains_para_ids::(vec![thread_a])); + } else { + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + } + + // Occupy the cores based on the output of update_claimqueue. + Scheduler::occupied( + core_assignments + .iter() + .map(|core_assignment| (core_assignment.core, core_assignment.para_id())) + .collect(), + ); + } + + // ParaId a does not exist in the claimqueue/availability_cores after + // threshold has been reached. + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + assert!(!availability_cores_contains_para_ids::(vec![thread_a])); + + // #25 + now += max_retries + 2; + + // Add assignment back to the mix. + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a.clone(), + QueuePushDirection::Back + )); + run_to_block(now, |_| None); + + assert!(claimqueue_contains_para_ids::(vec![thread_a])); + + // #26 + now += 1; + // Run to block #n but this time have group 1 conclude the availabilty. + for n in now..=(now + max_retries + 1) { + // #n + run_to_block(n, |_| None); + // Time out core 0 if group 0 is assigned to it, if group 1 is assigned, conclude. + let mut just_updated: BTreeMap = BTreeMap::new(); + if let Some(group) = Scheduler::group_assigned_to_core(CoreIndex(0), n) { + match group { + GroupIndex(0) => { + just_updated.insert(CoreIndex(0), FreedReason::TimedOut); // should go back on queue. + }, + GroupIndex(1) => { + just_updated.insert(CoreIndex(0), FreedReason::Concluded); + }, + _ => panic!("Should only have 2 groups here"), + } + } + + let core_assignments = Scheduler::update_claimqueue(just_updated, now); + + // ParaId a exists in the claim queue until groups are rotated. + if n < 31 { + assert!(claimqueue_contains_para_ids::(vec![thread_a])); + } else { + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + } + + // Occupy the cores based on the output of update_claimqueue. + Scheduler::occupied( + core_assignments + .iter() + .map(|core_assignment| (core_assignment.core, core_assignment.para_id())) + .collect(), + ); + } + + // ParaId a does not exist in the claimqueue/availability_cores after + // being concluded + assert!(!claimqueue_contains_para_ids::(vec![thread_a])); + assert!(!availability_cores_contains_para_ids::(vec![thread_a])); + }); +} #[test] fn availability_predicate_works() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: default_config(), - ..Default::default() - }, - ..Default::default() - }; + let genesis_config = genesis_config(&default_config()); - let HostConfiguration { - group_rotation_frequency, - chain_availability_period, - thread_availability_period, - .. - } = default_config(); - //let collator = CollatorId::from(Sr25519Keyring::Alice.public()); + let HostConfiguration { group_rotation_frequency, paras_availability_period, .. } = + default_config(); - assert!( - chain_availability_period < thread_availability_period && - thread_availability_period < group_rotation_frequency - ); + assert!(paras_availability_period < group_rotation_frequency); let chain_a = ParaId::from(1_u32); let thread_a = ParaId::from(2_u32); @@ -956,7 +1104,7 @@ fn availability_predicate_works() { schedule_blank_para(chain_a, ParaKind::Parachain); schedule_blank_para(thread_a, ParaKind::Parathread); - // start a new session with our chain & thread registered. + // start a new session with our chain registered. run_to_block(1, |number| match number { 1 => Some(SessionChangeNotification { new_config: default_config(), @@ -974,16 +1122,17 @@ fn availability_predicate_works() { // assign some availability cores. { + let entry_ttl = 10_000; AvailabilityCores::::mutate(|cores| { - cores[0] = CoreOccupied::Parachain(chain_a); - // cores[1] = CoreOccupied::Parathread(ParathreadEntry { - // claim: ParathreadClaim(thread_a, Some(collator)), - // retries: 0, - // }) + cores[0] = + CoreOccupied::Paras(ParasEntry::new(Assignment::new(chain_a), entry_ttl)); + cores[1] = + CoreOccupied::Paras(ParasEntry::new(Assignment::new(thread_a), entry_ttl)); }); } - run_to_block(1 + thread_availability_period, |_| None); + run_to_block(1 + paras_availability_period, |_| None); + assert!(Scheduler::availability_timeout_predicate().is_none()); run_to_block(1 + group_rotation_frequency, |_| None); @@ -993,208 +1142,180 @@ fn availability_predicate_works() { .expect("predicate exists recently after rotation"); let now = System::block_number(); - let would_be_timed_out = now - thread_availability_period; + let would_be_timed_out = now - paras_availability_period; for i in 0..AvailabilityCores::::get().len() { // returns true for unoccupied cores. - // And can time out both threads and chains at this stage. + // And can time out paras at this stage. assert!(pred(CoreIndex(i as u32), would_be_timed_out)); } - assert!(!pred(CoreIndex(0), now)); // assigned: chain - //assert!(!pred(CoreIndex(1), now)); // assigned: thread + assert!(!pred(CoreIndex(0), now)); + assert!(!pred(CoreIndex(1), now)); assert!(pred(CoreIndex(2), now)); - // check the tighter bound on chains vs threads. - assert!(pred(CoreIndex(0), now - chain_availability_period)); - //assert!(!pred(CoreIndex(1), now - chain_availability_period)); + // check the tight bound. + assert!(pred(CoreIndex(0), now - paras_availability_period)); + assert!(pred(CoreIndex(1), now - paras_availability_period)); // check the threshold is exact. - assert!(!pred(CoreIndex(0), now - chain_availability_period + 1)); - //assert!(!pred(CoreIndex(1), now - thread_availability_period + 1)); + assert!(!pred(CoreIndex(0), now - paras_availability_period + 1)); + assert!(!pred(CoreIndex(1), now - paras_availability_period + 1)); } - run_to_block(1 + group_rotation_frequency + chain_availability_period, |_| None); + run_to_block(1 + group_rotation_frequency + paras_availability_period, |_| None); + }); +} + +#[test] +fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { + let mut config = default_config(); + config.on_demand_cores = 1; + + let genesis_config = genesis_config(&config); + + let thread_a = ParaId::from(1_u32); + let thread_b = ParaId::from(2_u32); + + new_test_ext(genesis_config).execute_with(|| { + schedule_blank_para(thread_a, ParaKind::Parathread); + schedule_blank_para(thread_b, ParaKind::Parathread); + + // start a new session to activate, 5 validators for 5 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: config.clone(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + let thread_entry_a = ParasEntry { + assignment: Assignment { para_id: thread_a }, + availability_timeouts: 0, + ttl: 5, + }; + let thread_entry_b = ParasEntry { + assignment: Assignment { para_id: thread_b }, + availability_timeouts: 0, + ttl: 5, + }; + + Scheduler::add_to_claimqueue(CoreIndex(0), thread_entry_a.clone()); + + run_to_block(2, |_| None); { - let pred = Scheduler::availability_timeout_predicate() - .expect("predicate exists recently after rotation"); + assert_eq!(Scheduler::claimqueue_len(), 1); + assert_eq!(Scheduler::availability_cores().len(), 1); - let would_be_timed_out = System::block_number() - thread_availability_period; + let mut map = BTreeMap::new(); + map.insert(CoreIndex(0), thread_a); + Scheduler::occupied(map); - assert!(!pred(CoreIndex(0), would_be_timed_out)); // chains can't be timed out now. - assert!(pred(CoreIndex(1), would_be_timed_out)); // but threads can. - } + let cores = Scheduler::availability_cores(); + match &cores[0] { + CoreOccupied::Paras(entry) => assert_eq!(entry, &thread_entry_a), + _ => panic!("with no chains, only core should be a thread core"), + } - run_to_block(1 + group_rotation_frequency + thread_availability_period, |_| None); + assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none()); - assert!(Scheduler::availability_timeout_predicate().is_none()); + Scheduler::add_to_claimqueue(CoreIndex(0), thread_entry_b); + + assert_eq!( + Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), + ScheduledCore { para_id: thread_b, collator: None } + ); + } }); } -//#[test] -//fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { -// let mut config = default_config(); -// config.parathread_cores = 1; -// -// let genesis_config = MockGenesisConfig { -// configuration: crate::configuration::GenesisConfig { -// config: config.clone(), -// ..Default::default() -// }, -// ..Default::default() -// }; -// -// let thread_a = ParaId::from(1_u32); -// let thread_b = ParaId::from(2_u32); -// -// let collator = CollatorId::from(Sr25519Keyring::Alice.public()); -// -// new_test_ext(genesis_config).execute_with(|| { -// schedule_blank_para(thread_a, ParaKind::Parathread); -// schedule_blank_para(thread_b, ParaKind::Parathread); -// -// // start a new session to activate, 5 validators for 5 cores. -// run_to_block(1, |number| match number { -// 1 => Some(SessionChangeNotification { -// new_config: config.clone(), -// validators: vec![ -// ValidatorId::from(Sr25519Keyring::Alice.public()), -// ValidatorId::from(Sr25519Keyring::Eve.public()), -// ], -// ..Default::default() -// }), -// _ => None, -// }); -// -// let thread_claim_a = ParathreadClaim(thread_a, collator.clone()); -// let thread_claim_b = ParathreadClaim(thread_b, collator.clone()); -// -// SchedulerParathreads::add_parathread_claim(thread_claim_a.clone()); -// -// run_to_block(2, |_| None); -// -// { -// assert_eq!(Scheduler::scheduled().len(), 1); -// assert_eq!(Scheduler::availability_cores().len(), 1); -// -// Scheduler::occupied(&[CoreIndex(0)]); -// -// let cores = Scheduler::availability_cores(); -// match cores[0].as_ref().unwrap() { -// CoreOccupied::Parathread(entry) => assert_eq!(entry.claim, thread_claim_a), -// _ => panic!("with no chains, only core should be a thread core"), -// } -// -// assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none()); -// -// SchedulerParathreads::add_parathread_claim(thread_claim_b); -// -// let queue = ParathreadQueue::::get(); -// assert_eq!( -// queue.get_next_on_core(0).unwrap().claim, -// ParathreadClaim(thread_b, collator.clone()), -// ); -// -// assert_eq!( -// Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), -// ScheduledCore { para_id: thread_b, collator: Some(collator.clone()) } -// ); -// } -// }); -//} - -//#[test] -//fn next_up_on_time_out_reuses_claim_if_nothing_queued() { -// let mut config = default_config(); -// config.parathread_cores = 1; -// -// let genesis_config = MockGenesisConfig { -// configuration: crate::configuration::GenesisConfig { -// config: config.clone(), -// ..Default::default() -// }, -// ..Default::default() -// }; -// -// let thread_a = ParaId::from(1_u32); -// let thread_b = ParaId::from(2_u32); -// -// let collator = CollatorId::from(Sr25519Keyring::Alice.public()); -// -// new_test_ext(genesis_config).execute_with(|| { -// schedule_blank_para(thread_a, ParaKind::Parathread); -// schedule_blank_para(thread_b, ParaKind::Parathread); -// -// // start a new session to activate, 5 validators for 5 cores. -// run_to_block(1, |number| match number { -// 1 => Some(SessionChangeNotification { -// new_config: config.clone(), -// validators: vec![ -// ValidatorId::from(Sr25519Keyring::Alice.public()), -// ValidatorId::from(Sr25519Keyring::Eve.public()), -// ], -// ..Default::default() -// }), -// _ => None, -// }); -// -// let thread_claim_a = ParathreadClaim(thread_a, collator.clone()); -// let thread_claim_b = ParathreadClaim(thread_b, collator.clone()); -// -// SchedulerParathreads::add_parathread_claim(thread_claim_a.clone()); -// -// run_to_block(2, |_| None); -// -// { -// assert_eq!(Scheduler::scheduled().len(), 1); -// assert_eq!(Scheduler::availability_cores().len(), 1); -// -// Scheduler::occupied(&[CoreIndex(0)]); -// -// let cores = Scheduler::availability_cores(); -// match cores[0].as_ref().unwrap() { -// CoreOccupied::Parathread(entry) => assert_eq!(entry.claim, thread_claim_a), -// _ => panic!("with no chains, only core should be a thread core"), -// } -// -// let queue = ParathreadQueue::::get(); -// assert!(queue.get_next_on_core(0).is_none()); -// assert_eq!( -// Scheduler::next_up_on_time_out(CoreIndex(0)).unwrap(), -// ScheduledCore { para_id: thread_a, collator: Some(collator.clone()) } -// ); -// -// SchedulerParathreads::add_parathread_claim(thread_claim_b); -// -// let queue = ParathreadQueue::::get(); -// assert_eq!( -// queue.get_next_on_core(0).unwrap().claim, -// ParathreadClaim(thread_b, collator.clone()), -// ); -// -// // Now that there is an earlier next-up, we use that. -// assert_eq!( -// Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), -// ScheduledCore { para_id: thread_b, collator: Some(collator.clone()) } -// ); -// } -// }); -//} +#[test] +fn next_up_on_time_out_reuses_claim_if_nothing_queued() { + let mut config = default_config(); + config.on_demand_cores = 1; -fn genesis_config(config: &HostConfiguration) -> MockGenesisConfig { - MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: config.clone(), - ..Default::default() - }, - ..Default::default() - } + let genesis_config = genesis_config(&config); + + let thread_a = ParaId::from(1_u32); + let thread_b = ParaId::from(2_u32); + + let assignment_a = Assignment { para_id: thread_a }; + let assignment_b = Assignment { para_id: thread_b }; + + new_test_ext(genesis_config).execute_with(|| { + schedule_blank_para(thread_a, ParaKind::Parathread); + schedule_blank_para(thread_b, ParaKind::Parathread); + + // start a new session to activate, 5 validators for 5 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: config.clone(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_a.clone(), + QueuePushDirection::Back + )); + + run_to_block(2, |_| None); + + { + assert_eq!(Scheduler::claimqueue().len(), 1); + assert_eq!(Scheduler::availability_cores().len(), 1); + + let mut map = BTreeMap::new(); + map.insert(CoreIndex(0), thread_a); + Scheduler::occupied(map); + + let cores = Scheduler::availability_cores(); + match cores.get(0).unwrap() { + CoreOccupied::Paras(entry) => assert_eq!(entry.assignment, assignment_a.clone()), + _ => panic!("with no chains, only core should be a thread core"), + } + + // There's nothing more to pop for core 0 from the assignment provider. + assert!( + OnDemandAssigner::pop_assignment_for_core(CoreIndex(0), Some(thread_a)).is_none() + ); + + assert_eq!( + Scheduler::next_up_on_time_out(CoreIndex(0)).unwrap(), + ScheduledCore { para_id: thread_a, collator: None } + ); + + assert_ok!(OnDemandAssigner::add_on_demand_assignment( + assignment_b.clone(), + QueuePushDirection::Back + )); + + // Pop assignment_b into the claimqueue + Scheduler::update_claimqueue(BTreeMap::new(), 2); + + //// Now that there is an earlier next-up, we use that. + assert_eq!( + Scheduler::next_up_on_available(CoreIndex(0)).unwrap(), + ScheduledCore { para_id: thread_b, collator: None } + ); + } + }); } #[test] fn next_up_on_available_is_parachain_always() { let mut config = default_config(); - config.parathread_cores = 0; + config.on_demand_cores = 0; let genesis_config = genesis_config(&config); let chain_a = ParaId::from(1_u32); @@ -1223,8 +1344,8 @@ fn next_up_on_available_is_parachain_always() { Scheduler::occupied(vec![(CoreIndex(0), chain_a)].into_iter().collect()); let cores = Scheduler::availability_cores(); - match cores[0] { - CoreOccupied::Parachain(_) => {}, + match &cores[0] { + CoreOccupied::Paras(pe) if pe.para_id() == chain_a => {}, _ => panic!("with no threads, only core should be a chain core"), } @@ -1240,15 +1361,9 @@ fn next_up_on_available_is_parachain_always() { #[test] fn next_up_on_time_out_is_parachain_always() { let mut config = default_config(); - config.parathread_cores = 0; + config.on_demand_cores = 0; - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: config.clone(), - ..Default::default() - }, - ..Default::default() - }; + let genesis_config = genesis_config(&config); let chain_a = ParaId::from(1_u32); @@ -1277,9 +1392,9 @@ fn next_up_on_time_out_is_parachain_always() { Scheduler::occupied(vec![(CoreIndex(0), chain_a)].into_iter().collect()); let cores = Scheduler::availability_cores(); - match cores[0] { - CoreOccupied::Parachain(_) => {}, - _ => panic!("with no threads, only core should be a chain core"), + match &cores[0] { + CoreOccupied::Paras(pe) if pe.para_id() == chain_a => {}, + _ => panic!("Core should be occupied by chain_a ParaId"), } // Now that there is an earlier next-up, we use that. @@ -1295,12 +1410,9 @@ fn next_up_on_time_out_is_parachain_always() { fn session_change_requires_reschedule_dropping_removed_paras() { let mut config = default_config(); config.scheduling_lookahead = 1; - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { config, ..Default::default() }, - ..Default::default() - }; + let genesis_config = genesis_config(&config); - assert_eq!(default_config().parathread_cores, 3); + assert_eq!(default_config().on_demand_cores, 3); new_test_ext(genesis_config).execute_with(|| { let chain_a = ParaId::from(1_u32); let chain_b = ParaId::from(2_u32); @@ -1327,12 +1439,10 @@ fn session_change_requires_reschedule_dropping_removed_paras() { _ => None, }); - // TODO: remove + ... once parathreads assigner is in use - assert_eq!(Scheduler::claimqueue().len() as u32 + default_config().parathread_cores, 5); + assert_eq!(Scheduler::claimqueue().len(), 2); let groups = ValidatorGroups::::get(); - // TODO: remove + ... once parathreads assigner is in use - assert_eq!(groups.len() as u32 + default_config().parathread_cores, 5); + assert_eq!(groups.len(), 5); assert_ok!(Paras::schedule_para_cleanup(chain_b)); run_to_end_of_block(2, |number| match number { @@ -1359,11 +1469,11 @@ fn session_change_requires_reschedule_dropping_removed_paras() { Scheduler::claimqueue(), vec![( CoreIndex(0), - vec![Some(CoreAssignment { - core: CoreIndex(0), - kind: Assignment::Parachain(chain_a), - group_idx: GroupIndex(0), - })] + vec![Some(ParasEntry::new( + Assignment::new(chain_a), + // At end of block 2 + config.on_demand_ttl + 2 + ))] .into_iter() .collect() )] @@ -1392,12 +1502,10 @@ fn session_change_requires_reschedule_dropping_removed_paras() { _ => None, }); - // TODO: remove + ... once parathreads assigner is in use - assert_eq!(Scheduler::claimqueue().len() as u32 + default_config().parathread_cores, 5); + assert_eq!(Scheduler::claimqueue().len(), 2); let groups = ValidatorGroups::::get(); - // TODO: remove + ... once parathreads assigner is in use - assert_eq!(groups.len() as u32 + default_config().parathread_cores, 5); + assert_eq!(groups.len(), 5); Scheduler::update_claimqueue(BTreeMap::new(), 4); @@ -1406,21 +1514,21 @@ fn session_change_requires_reschedule_dropping_removed_paras() { vec![ ( CoreIndex(0), - vec![Some(CoreAssignment { - core: CoreIndex(0), - kind: Assignment::Parachain(chain_a), - group_idx: GroupIndex(0), - })] + vec![Some(ParasEntry::new( + Assignment::new(chain_a), + // At block 3 + config.on_demand_ttl + 3 + ))] .into_iter() .collect() ), ( CoreIndex(1), - vec![Some(CoreAssignment { - core: CoreIndex(1), - kind: Assignment::Parachain(chain_b), - group_idx: GroupIndex(1), - })] + vec![Some(ParasEntry::new( + Assignment::new(chain_b), + // At block 3 + config.on_demand_ttl + 3 + ))] .into_iter() .collect() ), @@ -1430,62 +1538,3 @@ fn session_change_requires_reschedule_dropping_removed_paras() { ); }); } - -//#[test] -//fn parathread_claims_are_pruned_after_deregistration() { -// let genesis_config = MockGenesisConfig { -// configuration: crate::configuration::GenesisConfig { -// config: default_config(), -// ..Default::default() -// }, -// ..Default::default() -// }; -// -// let thread_a = ParaId::from(1_u32); -// let thread_b = ParaId::from(2_u32); -// -// let collator = CollatorId::from(Sr25519Keyring::Alice.public()); -// -// new_test_ext(genesis_config).execute_with(|| { -// assert_eq!(default_config().parathread_cores, 3); -// -// schedule_blank_para(thread_a, ParaKind::Parathread); -// schedule_blank_para(thread_b, ParaKind::Parathread); -// -// // start a new session to activate, 5 validators for 5 cores. -// run_to_block(1, |number| match number { -// 1 => Some(SessionChangeNotification { -// new_config: default_config(), -// validators: vec![ -// ValidatorId::from(Sr25519Keyring::Alice.public()), -// ValidatorId::from(Sr25519Keyring::Eve.public()), -// ], -// ..Default::default() -// }), -// _ => None, -// }); -// -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); -// SchedulerParathreads::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); -// -// run_to_block(2, |_| None); -// assert_eq!(Scheduler::scheduled().len(), 2); -// -// assert_ok!(Paras::schedule_para_cleanup(thread_a)); -// -// // start a new session to activate, 5 validators for 5 cores. -// run_to_block(3, |number| match number { -// 3 => Some(SessionChangeNotification { -// new_config: default_config(), -// validators: vec![ -// ValidatorId::from(Sr25519Keyring::Alice.public()), -// ValidatorId::from(Sr25519Keyring::Eve.public()), -// ], -// ..Default::default() -// }), -// _ => None, -// }); -// -// assert_eq!(Scheduler::scheduled().len(), 1); -// }); -//} diff --git a/runtime/parachains/src/scheduler_common/mod.rs b/runtime/parachains/src/scheduler_common/mod.rs index 1f94c8f283ae..427fbd65a790 100644 --- a/runtime/parachains/src/scheduler_common/mod.rs +++ b/runtime/parachains/src/scheduler_common/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. +// Copyright (C) Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -36,13 +36,17 @@ //! over time. use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ - CollatorId, CoreIndex, CoreOccupied, GroupIndex, Id as ParaId, ParathreadEntry, ScheduledCore, + v5::{Assignment, ParasEntry}, + CoreIndex, GroupIndex, Id as ParaId, }; use scale_info::TypeInfo; use sp_std::prelude::*; +// Only used to link to configuration documentation. +#[allow(unused)] +use crate::configuration::HostConfiguration; + /// Reasons a core might be freed #[derive(Clone, Copy)] pub enum FreedReason { @@ -52,103 +56,54 @@ pub enum FreedReason { TimedOut, } -#[derive(Clone, Encode, Decode, PartialEq, TypeInfo)] -#[cfg_attr(feature = "std", derive(Debug))] -pub enum Assignment { - Parachain(ParaId), - ParathreadA(ParathreadEntry), -} - -impl Assignment { - pub fn para_id(&self) -> ParaId { - match self { - Assignment::Parachain(para_id) => *para_id, - Assignment::ParathreadA(entry) => entry.claim.0, - } - } - - pub fn get_collator(&self) -> Option { - match self { - Assignment::Parachain(_) => None, - Assignment::ParathreadA(entry) => entry.claim.1.clone(), - } - } - - // Note: this happens on session change. We don't rescheduled pay-as-you-go parachains if they have been tried to run at least once - pub fn from_core_occupied(co: CoreOccupied) -> Option { - match co { - CoreOccupied::Parachain(para_id) => Some(Assignment::Parachain(para_id)), - CoreOccupied::Parathread(entry) => - if entry.retries > 0 { - None - } else { - Some(Assignment::ParathreadA(entry)) - }, - CoreOccupied::Free => None, - } - } - - pub fn to_core_assignment(self, core_idx: CoreIndex, group_idx: GroupIndex) -> CoreAssignment { - CoreAssignment { core: core_idx, group_idx, kind: self } - } - - pub fn from_core_assignment(ca: CoreAssignment) -> Assignment { - ca.kind - } -} - -pub trait AssignmentProvider { +pub trait AssignmentProvider { + /// How many cores are allocated to this provider. fn session_core_count() -> u32; - fn new_session(); - + /// Pops an [`Assignment`] from the provider for a specified [`CoreIndex`]. + /// The `concluded_para` field makes the caller report back to the provider + /// which [`ParaId`] it processed last on the supplied [`CoreIndex`]. fn pop_assignment_for_core( core_idx: CoreIndex, concluded_para: Option, ) -> Option; - // on session change + /// Push back an already popped assignment. Intended for provider implementations + /// that need to be able to keep track of assignments over session boundaries, + /// such as the on demand assignment provider. fn push_assignment_for_core(core_idx: CoreIndex, assignment: Assignment); - fn get_availability_period(core_idx: CoreIndex) -> BlockNumberFor; + /// Returns the availability period specified by the implementation. + /// See + /// [`HostConfiguration::paras_availability_period`] + /// for more information. + fn get_availability_period(core_idx: CoreIndex) -> BlockNumber; + /// How many times a collation can time out on availability. + /// Zero retries still means that a collation can be provided as per the slot auction assignment provider. fn get_max_retries(core_idx: CoreIndex) -> u32; } -/// How a free core is scheduled to be assigned. +/// How a core is mapped to a backing group and a `ParaId` #[derive(Clone, Encode, Decode, PartialEq, TypeInfo)] #[cfg_attr(feature = "std", derive(Debug))] -pub struct CoreAssignment { +pub struct CoreAssignment { /// The core that is assigned. pub core: CoreIndex, - /// The kind of the assignment. - pub kind: Assignment, + /// The para id and accompanying information needed to collate and back a parablock. + pub paras_entry: ParasEntry, /// The index of the validator group assigned to the core. pub group_idx: GroupIndex, } -impl CoreAssignment { - pub fn new(core: CoreIndex, kind: Assignment, group_idx: GroupIndex) -> Self { - CoreAssignment { core, kind, group_idx } - } - - /// Get the ID of a collator who is required to collate this block. - pub fn required_collator(&self) -> Option<&CollatorId> { - match &self.kind { - Assignment::Parachain(_) => None, - Assignment::ParathreadA(entry) => entry.claim.1.as_ref(), - } - } - - /// Get the `CoreOccupied` from this. - pub fn to_core_occupied(self) -> CoreOccupied { - match self.kind { - Assignment::Parachain(para_id) => CoreOccupied::Parachain(para_id), - Assignment::ParathreadA(entry) => CoreOccupied::Parathread(entry), - } +impl CoreAssignment { + /// Returns the [`ParaId`] of the assignment. + pub fn para_id(&self) -> ParaId { + self.paras_entry.para_id() } - pub fn to_scheduled_core(self) -> ScheduledCore { - ScheduledCore { para_id: self.kind.para_id(), collator: self.kind.get_collator() } + /// Returns the inner [`ParasEntry`] of the assignment. + pub fn to_paras_entry(self) -> ParasEntry { + self.paras_entry } } diff --git a/runtime/parachains/src/scheduler_polkadot/mod.rs b/runtime/parachains/src/scheduler_polkadot/mod.rs deleted file mode 100644 index 72e55a447783..000000000000 --- a/runtime/parachains/src/scheduler_polkadot/mod.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{CoreIndex, Id as ParaId}; - -use crate::{ - configuration, paras, - scheduler_common::{Assignment, AssignmentProvider}, -}; - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - - #[pallet::pallet] - #[pallet::without_storage_info] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: - frame_system::Config - + configuration::Config - + paras::Config - + crate::scheduler_parachains::Config - { - } - - #[pallet::storage] - pub(crate) type NumParachains = StorageValue<_, Option, ValueQuery>; -} - -impl AssignmentProvider for Pallet { - fn session_core_count() -> u32 { - >::session_core_count() - //+ >::config().parathread_cores - //crate::scheduler_parathreads::Pallet>::session_core_count() - } - - fn new_session() { - let n_parachains = >::session_core_count(); - NumParachains::::mutate(|val| *val = Some(n_parachains)); - } - - fn pop_assignment_for_core( - core_idx: CoreIndex, - concluded_para: Option, - ) -> Option { - let parachains_cores = >::session_core_count(); - if (0..parachains_cores).contains(&core_idx.0) { - >::pop_assignment_for_core( - core_idx, - concluded_para, - ) - } else { - let _core_idx = CoreIndex(core_idx.0 - parachains_cores); - todo!() - //>::pop_assignment_for_core(core_idx, concluded_para) - } - } - - fn push_assignment_for_core(core_idx: CoreIndex, assignment: Assignment) { - let parachain_cores = NumParachains::::get() - .unwrap_or_else(|| >::session_core_count()); - if (0..parachain_cores).contains(&core_idx.0) { - >::push_assignment_for_core(core_idx, assignment) - } else { - let _core_idx = CoreIndex(core_idx.0 - parachain_cores); - todo!() - //>::push_assignment_for_core( - // core_idx, assignment, - //) - } - } - - fn get_availability_period(core_idx: CoreIndex) -> BlockNumberFor { - let parachains_cores = >::session_core_count(); - if (0..parachains_cores).contains(&core_idx.0) { - >::get_availability_period(core_idx) - } else { - let _core_idx = CoreIndex(core_idx.0 - parachains_cores); - todo!() - //>::get_availability_period(core_idx) - } - } - - fn get_max_retries(core_idx: CoreIndex) -> u32 { - let parachains_cores = >::session_core_count(); - if (0..parachains_cores).contains(&core_idx.0) { - >::get_max_retries(core_idx) - } else { - let _core_idx = CoreIndex(core_idx.0 - parachains_cores); - todo!() - //>::get_max_retries(core_idx) - } - } -} diff --git a/runtime/parachains/src/session_info/tests.rs b/runtime/parachains/src/session_info/tests.rs index 63226fb7cf81..1a04f95140d0 100644 --- a/runtime/parachains/src/session_info/tests.rs +++ b/runtime/parachains/src/session_info/tests.rs @@ -62,7 +62,7 @@ fn run_to_block( fn default_config() -> HostConfiguration { HostConfiguration { - parathread_cores: 1, + on_demand_cores: 1, dispute_period: 2, needed_approvals: 3, ..Default::default() diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index ccdea55c7dd5..a42fa9f1c1c5 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -27,6 +27,7 @@ use runtime_common::{ }; use runtime_parachains::{ + assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, @@ -34,7 +35,7 @@ use runtime_parachains::{ initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, runtime_api_impl::v5 as parachains_runtime_api_impl, - scheduler as parachains_scheduler, scheduler_polkadot, session_info as parachains_session_info, + scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -1182,12 +1183,13 @@ impl parachains_hrmp::Config for Runtime { impl parachains_paras_inherent::Config for Runtime { type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; } -impl runtime_parachains::scheduler_polkadot::Config for Runtime {} -impl runtime_parachains::scheduler_parachains::Config for Runtime {} + impl parachains_scheduler::Config for Runtime { - type AssignmentProvider = runtime_parachains::scheduler_polkadot::Pallet; + type AssignmentProvider = ParaAssignmentProvider; } +impl parachains_assigner_parachains::Config for Runtime {} + impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; type ForceOrigin = EnsureRoot; @@ -1352,7 +1354,6 @@ construct_runtime! { System: frame_system::{Pallet, Call, Storage, Config, Event} = 0, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 1, Preimage: pallet_preimage::{Pallet, Call, Storage, Event} = 10, - SchedulerPolkadot: scheduler_polkadot::{Pallet, Storage} = 41, // Babe must be before session. Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned} = 2, @@ -1438,6 +1439,7 @@ construct_runtime! { ParaSessionInfo: parachains_session_info::{Pallet, Storage} = 61, ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 62, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 63, + ParaAssignmentProvider: parachains_assigner_parachains::{Pallet} = 64, // Parachain Onboarding Pallets. Start indices at 70 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event} = 70, diff --git a/runtime/polkadot/src/weights/runtime_parachains_configuration.rs b/runtime/polkadot/src/weights/runtime_parachains_configuration.rs index af8e4c111b20..cee8d6c42d53 100644 --- a/runtime/polkadot/src/weights/runtime_parachains_configuration.rs +++ b/runtime/polkadot/src/weights/runtime_parachains_configuration.rs @@ -150,4 +150,6 @@ impl runtime_parachains::configuration::WeightInfo for .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } + + fn set_config_with_perbill() -> Weight { todo!() } } diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 13f9b4691ce4..338af8ab92ef 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -38,6 +38,8 @@ use scale_info::TypeInfo; use sp_std::{cmp::Ordering, collections::btree_map::BTreeMap, prelude::*}; use runtime_parachains::{ + assigner as parachains_assigner, assigner_on_demand as parachains_assigner_on_demand, + assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, @@ -45,8 +47,8 @@ use runtime_parachains::{ initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, runtime_api_impl::v5 as parachains_runtime_api_impl, - scheduler as parachains_scheduler, scheduler_parachains, - session_info as parachains_session_info, shared as parachains_shared, + scheduler as parachains_scheduler, session_info as parachains_session_info, + shared as parachains_shared, }; use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; @@ -77,7 +79,7 @@ use sp_runtime::{ Extrinsic as ExtrinsicT, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, KeyTypeId, Perbill, Percent, Permill, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] @@ -879,6 +881,7 @@ pub enum ProxyType { CancelProxy, Auction, Society, + OnDemandOrdering, } impl Default for ProxyType { fn default() -> Self { @@ -965,6 +968,7 @@ impl InstanceFilter for ProxyType { RuntimeCall::Slots { .. } ), ProxyType::Society => matches!(c, RuntimeCall::Society(..)), + ProxyType::OnDemandOrdering => matches!(c, RuntimeCall::OnDemandAssignmentProvider(..)), } } fn is_superset(&self, o: &Self) -> bool { @@ -1095,9 +1099,26 @@ impl parachains_paras_inherent::Config for Runtime { type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; } -impl runtime_parachains::scheduler_parachains::Config for Runtime {} impl parachains_scheduler::Config for Runtime { - type AssignmentProvider = scheduler_parachains::Pallet; + type AssignmentProvider = ParaAssignmentProvider; +} + +parameter_types! { + pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); +} + +impl parachains_assigner_on_demand::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type TrafficDefaultValue = OnDemandTrafficDefaultValue; + type WeightInfo = weights::runtime_parachains_assigner_on_demand::WeightInfo; +} + +impl parachains_assigner_parachains::Config for Runtime {} + +impl parachains_assigner::Config for Runtime { + type OnDemandAssignmentProvider = OnDemandAssignmentProvider; + type ParachainsAssignmentProvider = ParachainsAssignmentProvider; } impl parachains_initializer::Config for Runtime { @@ -1457,6 +1478,9 @@ construct_runtime! { ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 62, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 63, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 64, + ParaAssignmentProvider: parachains_assigner::{Pallet, Storage} = 65, + OnDemandAssignmentProvider: parachains_assigner_on_demand::{Pallet, Call, Storage, Event} = 66, + ParachainsAssignmentProvider: parachains_assigner_parachains::{Pallet} = 67, // Parachain Onboarding Pallets. Start indices at 70 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event, Config} = 70, @@ -1530,6 +1554,7 @@ pub mod migrations { pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, parachains_scheduler::migration::v1::MigrateToV1, + parachains_configuration::migration::v7::MigrateToV7, ); } @@ -1588,6 +1613,7 @@ mod benches { [runtime_parachains::initializer, Initializer] [runtime_parachains::paras_inherent, ParaInherent] [runtime_parachains::paras, Paras] + [runtime_parachains::assigner_on_demand, OnDemandAssignmentProvider] // Substrate [pallet_balances, Balances] [pallet_balances, NisCounterpartBalances] diff --git a/runtime/rococo/src/weights/mod.rs b/runtime/rococo/src/weights/mod.rs index 5bc39330e28e..3264cdb3a3c7 100644 --- a/runtime/rococo/src/weights/mod.rs +++ b/runtime/rococo/src/weights/mod.rs @@ -47,6 +47,7 @@ pub mod runtime_common_claims; pub mod runtime_common_crowdloan; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; +pub mod runtime_parachains_assigner_on_demand; pub mod runtime_parachains_configuration; pub mod runtime_parachains_disputes; pub mod runtime_parachains_hrmp; diff --git a/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs b/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs new file mode 100644 index 000000000000..6e5b45fcf5b2 --- /dev/null +++ b/runtime/rococo/src/weights/runtime_parachains_assigner_on_demand.rs @@ -0,0 +1,71 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `runtime_parachains::assigner_on_demand` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-04, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::assigner_on_demand +// --chain=rococo-dev +// --header=./file_header.txt +// --output=./runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_parachains::assigner_on_demand`. +pub struct WeightInfo(PhantomData); +impl runtime_parachains::assigner_on_demand::WeightInfo for WeightInfo { + /// Storage: `OnDemandAssignmentProvider::SpotTraffic` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::SpotTraffic` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::OnDemandQueue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::OnDemandQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 10000]`. + fn place_order(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `293 + s * (4 ±0)` + // Estimated: `3756 + s * (4 ±0)` + // Minimum execution time: 32_958_000 picoseconds. + Weight::from_parts(34_344_493, 0) + .saturating_add(Weight::from_parts(0, 3756)) + // Standard Error: 127 + .saturating_add(Weight::from_parts(13_926, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + } +} diff --git a/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/runtime/rococo/src/weights/runtime_parachains_configuration.rs index c44046382d5a..29f387657786 100644 --- a/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -17,24 +17,25 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=rococo-dev // --steps=50 // --repeat=20 -// --pallet=runtime_parachains::configuration // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::configuration +// --chain=rococo-dev // --header=./file_header.txt -// --output=./runtime/rococo/src/weights/runtime_parachains_configuration.rs +// --output=./runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,63 +48,56 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::configuration`. pub struct WeightInfo(PhantomData); impl runtime_parachains::configuration::WeightInfo for WeightInfo { - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `414` - // Estimated: `1899` - // Minimum execution time: 13_097_000 picoseconds. - Weight::from_parts(13_667_000, 0) - .saturating_add(Weight::from_parts(0, 1899)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_051_000 picoseconds. + Weight::from_parts(9_496_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `414` - // Estimated: `1899` - // Minimum execution time: 13_199_000 picoseconds. - Weight::from_parts(13_400_000, 0) - .saturating_add(Weight::from_parts(0, 1899)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_104_000 picoseconds. + Weight::from_parts(9_403_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `397` - // Estimated: `1882` - // Minimum execution time: 12_831_000 picoseconds. - Weight::from_parts(13_151_000, 0) - .saturating_add(Weight::from_parts(0, 1882)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_112_000 picoseconds. + Weight::from_parts(9_495_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_hrmp_open_request_ttl() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -112,40 +106,52 @@ impl runtime_parachains::configuration::WeightInfo for Weight::from_parts(2_000_000_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `414` - // Estimated: `1899` - // Minimum execution time: 13_059_000 picoseconds. - Weight::from_parts(13_481_000, 0) - .saturating_add(Weight::from_parts(0, 1899)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_011_000 picoseconds. + Weight::from_parts(9_460_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `414` - // Estimated: `1899` - // Minimum execution time: 13_764_000 picoseconds. - Weight::from_parts(14_224_000, 0) - .saturating_add(Weight::from_parts(0, 1899)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_940_000 picoseconds. + Weight::from_parts(10_288_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_perbill() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_192_000 picoseconds. + Weight::from_parts(9_595_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index b762870edb4f..3d21e6da0191 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -25,12 +25,13 @@ use parity_scale_codec::Encode; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; use polkadot_runtime_parachains::{ + assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras, paras_inherent as parachains_paras_inherent, runtime_api_impl::v5 as runtime_impl, - scheduler as parachains_scheduler, scheduler_polkadot, session_info as parachains_session_info, + scheduler as parachains_scheduler, session_info as parachains_session_info, shared as parachains_shared, }; @@ -555,10 +556,10 @@ impl parachains_hrmp::Config for Runtime { type WeightInfo = parachains_hrmp::TestWeightInfo; } -impl polkadot_runtime_parachains::scheduler_parachains::Config for Runtime {} -impl crate::scheduler_polkadot::Config for Runtime {} +impl parachains_assigner_parachains::Config for Runtime {} + impl parachains_scheduler::Config for Runtime { - type AssignmentProvider = crate::scheduler_polkadot::Pallet; + type AssignmentProvider = ParaAssignmentProvider; } impl paras_sudo_wrapper::Config for Runtime {} @@ -693,7 +694,6 @@ construct_runtime! { Paras: parachains_paras::{Pallet, Call, Storage, Event, ValidateUnsigned}, ParasShared: parachains_shared::{Pallet, Call, Storage}, Scheduler: parachains_scheduler::{Pallet, Storage}, - SchedulerPolkadot: scheduler_polkadot::{Pallet, Storage}, ParasSudoWrapper: paras_sudo_wrapper::{Pallet, Call}, ParasOrigin: parachains_origin::{Pallet, Origin}, ParaSessionInfo: parachains_session_info::{Pallet, Storage}, @@ -702,6 +702,7 @@ construct_runtime! { Xcm: pallet_xcm::{Pallet, Call, Event, Origin}, ParasDisputes: parachains_disputes::{Pallet, Storage, Event}, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned}, + ParaAssignmentProvider: parachains_assigner_parachains::{Pallet}, Sudo: pallet_sudo::{Pallet, Call, Storage, Config, Event}, diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index b1b117fd5d0b..c2a2930e7da6 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -52,6 +52,7 @@ use runtime_common::{ BlockHashCount, BlockLength, CurrencyToVote, SlowAdjustingFeeUpdate, U256ToBalance, }; use runtime_parachains::{ + assigner_parachains as parachains_assigner_parachains, configuration as parachains_configuration, disputes as parachains_disputes, disputes::slashing as parachains_slashing, dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, @@ -994,11 +995,12 @@ impl parachains_paras_inherent::Config for Runtime { type WeightInfo = weights::runtime_parachains_paras_inherent::WeightInfo; } -impl runtime_parachains::scheduler_parachains::Config for Runtime {} impl parachains_scheduler::Config for Runtime { - type AssignmentProvider = runtime_parachains::scheduler_parachains::Pallet; + type AssignmentProvider = ParaAssignmentProvider; } +impl parachains_assigner_parachains::Config for Runtime {} + impl parachains_initializer::Config for Runtime { type Randomness = pallet_babe::RandomnessFromOneEpochAgo; type ForceOrigin = EnsureRoot; @@ -1227,6 +1229,7 @@ construct_runtime! { ParaSessionInfo: parachains_session_info::{Pallet, Storage} = 52, ParasDisputes: parachains_disputes::{Pallet, Call, Storage, Event} = 53, ParasSlashing: parachains_slashing::{Pallet, Call, Storage, ValidateUnsigned} = 54, + ParaAssignmentProvider: parachains_assigner_parachains::{Pallet, Storage} = 55, // Parachain Onboarding Pallets. Start indices at 60 to leave room. Registrar: paras_registrar::{Pallet, Call, Storage, Event, Config} = 60, @@ -1289,6 +1292,7 @@ pub mod migrations { pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, parachains_scheduler::migration::v1::MigrateToV1, + parachains_configuration::migration::v7::MigrateToV7, ); } diff --git a/runtime/westend/src/weights/runtime_parachains_configuration.rs b/runtime/westend/src/weights/runtime_parachains_configuration.rs index 60f6f8e214c3..585dc9058f21 100644 --- a/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=runtime_parachains::configuration // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=runtime_parachains::configuration +// --chain=westend-dev // --header=./file_header.txt -// --output=./runtime/westend/src/weights/runtime_parachains_configuration.rs +// --output=./runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,56 +48,56 @@ use core::marker::PhantomData; /// Weight functions for `runtime_parachains::configuration`. pub struct WeightInfo(PhantomData); impl runtime_parachains::configuration::WeightInfo for WeightInfo { - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_998_000 picoseconds. - Weight::from_parts(10_268_000, 0) + // Minimum execution time: 9_616_000 picoseconds. + Weight::from_parts(9_961_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_851_000 picoseconds. - Weight::from_parts(10_102_000, 0) + // Minimum execution time: 9_587_000 picoseconds. + Weight::from_parts(9_964_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_932_000 picoseconds. - Weight::from_parts(10_248_000, 0) + // Minimum execution time: 9_650_000 picoseconds. + Weight::from_parts(9_960_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_hrmp_open_request_ttl() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -108,34 +106,50 @@ impl runtime_parachains::configuration::WeightInfo for Weight::from_parts(2_000_000_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 9_804_000 picoseconds. - Weight::from_parts(10_173_000, 0) + // Minimum execution time: 9_545_000 picoseconds. + Weight::from_parts(9_845_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Configuration PendingConfigs (r:1 w:1) - /// Proof Skipped: Configuration PendingConfigs (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration BypassConsistencyCheck (r:1 w:0) - /// Proof Skipped: Configuration BypassConsistencyCheck (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParasShared CurrentSessionIndex (r:1 w:0) - /// Proof Skipped: ParasShared CurrentSessionIndex (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: // Measured: `127` // Estimated: `1612` - // Minimum execution time: 10_531_000 picoseconds. - Weight::from_parts(10_984_000, 0) + // Minimum execution time: 10_258_000 picoseconds. + Weight::from_parts(10_607_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_perbill() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 9_502_000 picoseconds. + Weight::from_parts(9_902_000, 0) .saturating_add(Weight::from_parts(0, 1612)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index cc4a7eb2ccc1..5acf9c5321ba 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -22,7 +22,7 @@ zombienet-tests-parachains-smoke-test: - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG} - export MALUS_IMAGE=${MALUS_IMAGE_NAME}:${MALUS_IMAGE_TAG} - - export COL_IMAGE="docker.io/paritypr/colander:4519" # The collator image is fixed + - export COL_IMAGE="docker.io/paritypr/colander:7292" # The collator image is fixed script: - /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh --github-remote-dir="${GH_DIR}" diff --git a/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl b/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl index ccc1ea258f52..50fff9e3d597 100644 --- a/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl +++ b/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl @@ -9,7 +9,7 @@ honest-validator-2: reports node_roles is 4 malus-validator-0: reports node_roles is 4 # Parachains should be making progress even if we have up to 1/3 malicious validators. -honest-validator-0: parachain 2000 block height is at least 2 within 180 seconds +honest-validator-0: parachain 2000 block height is at least 2 within 240 seconds honest-validator-1: parachain 2001 block height is at least 2 within 180 seconds honest-validator-2: parachain 2002 block height is at least 2 within 180 seconds diff --git a/zombienet_tests/misc/0003-parathreads.toml b/zombienet_tests/misc/0003-parathreads.toml new file mode 100644 index 000000000000..83b6d39bffb0 --- /dev/null +++ b/zombienet_tests/misc/0003-parathreads.toml @@ -0,0 +1,32 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.nodes]] + name = "alice" + args = [ "--alice", "-lruntime=debug,parachain=trace" ] + + [[relaychain.nodes]] + name = "bob" + args = [ "--bob", "-lruntime=debug,parachain=trace" ] + +[[parachains]] +id = 100 +add_to_genesis = false +register_para = true +onboard_as_parachain = false + + [parachains.collator] + name = "collator01" + image = "{{COL_IMAGE}}" + command = "adder-collator" + args = [ "-lruntime=debug,parachain=trace" ] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/zombienet_tests/smoke/0001-parachains-smoke-test.zndsl b/zombienet_tests/smoke/0001-parachains-smoke-test.zndsl index 13d0624158f2..b280a198e085 100644 --- a/zombienet_tests/smoke/0001-parachains-smoke-test.zndsl +++ b/zombienet_tests/smoke/0001-parachains-smoke-test.zndsl @@ -3,4 +3,4 @@ Network: ./0001-parachains-smoke-test.toml Creds: config alice: parachain 100 is registered within 225 seconds -alice: parachain 100 block height is at least 10 within 200 seconds +alice: parachain 100 block height is at least 10 within 400 seconds diff --git a/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.zndsl b/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.zndsl index fec28455f5f2..bcea5aa1646e 100644 --- a/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.zndsl +++ b/zombienet_tests/smoke/0002-parachains-upgrade-smoke-test.zndsl @@ -3,6 +3,6 @@ Network: ./0002-parachains-upgrade-smoke-test.toml Creds: config alice: parachain 100 is registered within 225 seconds -alice: parachain 100 block height is at least 10 within 400 seconds +alice: parachain 100 block height is at least 10 within 460 seconds alice: parachain 100 perform dummy upgrade within 200 seconds alice: parachain 100 block height is at least 14 within 200 seconds