diff --git a/.github/workflows/domain-genesis-storage-snapshot-build.yml b/.github/workflows/domain-genesis-storage-snapshot-build.yml new file mode 100644 index 0000000000..aacd77b622 --- /dev/null +++ b/.github/workflows/domain-genesis-storage-snapshot-build.yml @@ -0,0 +1,49 @@ +# This action enabling building domain genesis storage used in registering new domain runtime, can be triggered manually or by release creation. +# +# Domain genesis storages are built for releases and for manually triggered runs, uploaded to artifacts and assets. +name: Domain genesis storage snapshot build + +on: + workflow_dispatch: + push: + tags: + - 'domain-genesis-storage-snapshot-*' + - 'domain-genesis-storage-gemini-*' + +jobs: + domain-genesis-storage: + runs-on: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "ubuntu-20.04-x86-64"]' || 'ubuntu-22.04') }} + permissions: + contents: write + packages: write + + steps: + - name: Build node image + id: build + uses: docker/build-push-action@c56af957549030174b10d6867f20e78cfd7debc5 # @v3.2.0 + with: + file: Dockerfile-node + push: false + + - name: Generate testnet domain genesis storages + run: | + docker run --rm -u root ${{ steps.build.outputs.digest }} domain build-genesis-storage --chain gemini-3h > domain-genesis-storage-gemini-3h + docker run --rm -u root ${{ steps.build.outputs.digest }} domain build-genesis-storage --chain devnet > domain-genesis-storage-devnet + + - name: Upload domain genesis storages to artifacts + uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # @v3.1.1 + with: + name: domain-genesis-storage + path: | + domain-genesis-storage-gemini-3h + domain-genesis-storage-devnet + if-no-files-found: error + + - name: Upload domain genesis storages to assets + uses: alexellis/upload-assets@259de5111cb56966d046ced998941e93f91d2c93 # @0.4.0 + env: + GITHUB_TOKEN: ${{ github.token }} + with: + asset_paths: '["domain-genesis-storage-gemini-3h", "domain-genesis-storage-devnet"]' + # Only run for releases + if: github.event_name == 'push' && github.ref_type == 'tag' diff --git a/.github/workflows/snapshot-build.yml b/.github/workflows/snapshot-build.yml index 95c9d5e638..a22152eb1e 100644 --- a/.github/workflows/snapshot-build.yml +++ b/.github/workflows/snapshot-build.yml @@ -86,7 +86,7 @@ jobs: with: token: ${{ secrets.GITHUB_TOKEN }} event-type: trivy-scan-dispatch - client-payload: '{"sha": "${{ github.sha }}"}' + client-payload: '{"image": "ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}:sha-${{ github.sha }}"}' executables: strategy: diff --git a/.github/workflows/trivy-security-scan.yml b/.github/workflows/trivy-security-scan.yml index e96accedfb..2f006a671e 100644 --- a/.github/workflows/trivy-security-scan.yml +++ b/.github/workflows/trivy-security-scan.yml @@ -17,7 +17,7 @@ jobs: steps: - name: Check Docker image availability with retry run: | - image="ghcr.io/${{ github.repository_owner }}/${{ github.event.client_payload.image }}:${{ github.event.client_payload.sha }}" + image="${{ github.event.client_payload.image }}" timeout=900 # Timeout in seconds (15 minutes) interval=300 # Interval between retries in seconds (5 minutes) retry_limit=5 # Number of retries @@ -43,12 +43,6 @@ jobs: needs: wait-for-image if: needs.wait-for-image.outputs.image-available == 'true' runs-on: ubuntu-latest - strategy: - matrix: - image: - - farmer - - node - - bootstrap-node steps: - name: Checkout code @@ -57,7 +51,7 @@ jobs: - name: Run Trivy vulnerability scanner on image uses: aquasecurity/trivy-action@d43c1f16c00cfd3978dde6c07f4bbcf9eb6993ca # @v0.16.1 with: - image-ref: ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}:sha-${{ github.event.client_payload.sha }} + image-ref: ${{ github.event.client_payload.image }} format: "sarif" output: "trivy-results.sarif" exit-code: "1" diff --git a/Cargo.lock b/Cargo.lock index 3494f44e65..aff108ccce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1320,11 +1320,11 @@ dependencies = [ [[package]] name = "blst" version = "0.3.11" -source = "git+https://github.com/supranational/blst.git#0d46eefa45fc1e57aceb42bba0e84eab3a7a9725" +source = "git+https://github.com/subspace/blst?rev=ab042e18cb3b62e131423380513964e4b2c7b445#ab042e18cb3b62e131423380513964e4b2c7b445" dependencies = [ "cc", "glob", - "threadpool", + "rayon", "zeroize", ] @@ -2648,10 +2648,12 @@ dependencies = [ name = "domain-runtime-primitives" version = "0.1.0" dependencies = [ + "fp-account", "frame-support", "frame-system", "parity-scale-codec", "scale-info", + "serde", "sp-api", "sp-core", "sp-runtime", @@ -7145,6 +7147,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-block-fees", + "sp-domains", "sp-runtime", "sp-std", ] @@ -7172,6 +7175,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "hex-literal", "log", "pallet-balances", "pallet-block-fees", @@ -7590,6 +7594,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-core", + "sp-domains", "sp-io", "sp-messenger", "sp-runtime", @@ -12054,6 +12059,7 @@ dependencies = [ name = "subspace-test-client" version = "0.1.0" dependencies = [ + "domain-runtime-primitives", "evm-domain-test-runtime", "fp-evm", "futures", diff --git a/Cargo.toml b/Cargo.toml index 37eed29b02..39a43f7fd9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -138,3 +138,8 @@ sp-inherents = { version = "4.0.0-dev", git = "https://github.com/subspace/polka sp-io = { version = "23.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } sp-runtime = { version = "24.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } substrate-prometheus-endpoint = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } + +# TODO: Importing https://github.com/supranational/blst/pull/203 to take advantage of optimizations introduced there, +# switch to upstream once merged or once similar performance improvements land upstream +[patch."https://github.com/supranational/blst.git"] +blst = { git = "https://github.com/subspace/blst", rev = "ab042e18cb3b62e131423380513964e4b2c7b445" } diff --git a/crates/pallet-domains/Cargo.toml b/crates/pallet-domains/Cargo.toml index 4c9a00c409..0115dc6312 100644 --- a/crates/pallet-domains/Cargo.toml +++ b/crates/pallet-domains/Cargo.toml @@ -18,6 +18,7 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, git = "h frame-support = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } frame-system = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } log = { version = "0.4.20", default-features = false } +pallet-balances = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } scale-info = { version = "2.7.0", default-features = false, features = ["derive"] } sp-consensus-slots = { version = "0.10.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } sp-consensus-subspace = { version = "0.1.0", default-features = false, path = "../sp-consensus-subspace" } @@ -33,7 +34,7 @@ subspace-runtime-primitives = { version = "0.1.0", default-features = false, pat [dev-dependencies] domain-pallet-executive = { version = "0.1.0", default-features = false, path = "../../domains/pallets/executive" } -pallet-balances = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } +hex-literal = "0.4.1" pallet-timestamp = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } pallet-block-fees = { version = "0.1.0", default-features = false, path = "../../domains/pallets/block-fees" } sp-externalities = { version = "0.19.0", git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } @@ -49,6 +50,7 @@ std = [ "frame-support/std", "frame-system/std", "log/std", + "pallet-balances/std", "scale-info/std", "sp-consensus-slots/std", "sp-consensus-subspace/std", diff --git a/crates/pallet-domains/src/benchmarking.rs b/crates/pallet-domains/src/benchmarking.rs index 8346dbe844..251370dcbd 100644 --- a/crates/pallet-domains/src/benchmarking.rs +++ b/crates/pallet-domains/src/benchmarking.rs @@ -225,6 +225,7 @@ mod benchmarks { bundle_slot_probability: (1, 1), target_bundles_per_block: 10, operator_allow_list: OperatorAllowList::Anyone, + initial_balances: Default::default(), }; #[extrinsic_call] @@ -434,6 +435,7 @@ mod benchmarks { bundle_slot_probability: (1, 1), target_bundles_per_block: 10, operator_allow_list: OperatorAllowList::Anyone, + initial_balances: Default::default(), }; assert_ok!(Domains::::instantiate_domain( diff --git a/crates/pallet-domains/src/block_tree.rs b/crates/pallet-domains/src/block_tree.rs index c87f335e8c..cf3ce89977 100644 --- a/crates/pallet-domains/src/block_tree.rs +++ b/crates/pallet-domains/src/block_tree.rs @@ -10,7 +10,10 @@ use frame_support::{ensure, PalletError}; use scale_info::TypeInfo; use sp_core::Get; use sp_domains::merkle_tree::MerkleTree; -use sp_domains::{ConfirmedDomainBlock, DomainId, ExecutionReceipt, OperatorId}; +use sp_domains::{ + ChainId, ConfirmedDomainBlock, DomainId, DomainsTransfersTracker, ExecutionReceipt, OperatorId, + Transfers, +}; use sp_runtime::traits::{BlockNumberProvider, CheckedSub, One, Saturating, Zero}; use sp_std::cmp::Ordering; use sp_std::collections::btree_map::BTreeMap; @@ -34,6 +37,9 @@ pub enum Error { InvalidExecutionTrace, UnavailableConsensusBlockHash, InvalidStateRoot, + BalanceOverflow, + DomainTransfersTracking, + InvalidDomainTransfers, } #[derive(TypeInfo, Debug, Encode, Decode, Clone, PartialEq, Eq)] @@ -344,6 +350,21 @@ pub(crate) fn process_execution_receipt( execution_receipt.consensus_block_number, ); + let block_fees = execution_receipt + .block_fees + .total_fees() + .ok_or(Error::BalanceOverflow)?; + + ensure!( + execution_receipt + .transfers + .is_valid(ChainId::Domain(domain_id)), + Error::InvalidDomainTransfers + ); + + update_domain_transfers::(domain_id, &execution_receipt.transfers, block_fees) + .map_err(|_| Error::DomainTransfersTracking)?; + LatestConfirmedDomainBlock::::insert( domain_id, ConfirmedDomainBlock { @@ -380,6 +401,59 @@ pub(crate) fn process_execution_receipt( Ok(None) } +type TransferTrackerError = + <::DomainsTransfersTracker as DomainsTransfersTracker>>::Error; + +/// Updates domain transfers for following scenarios +/// 1. Block fees are burned on domain +/// 2. Confirming incoming XDM transfers to the Domain +/// 3. Noting outgoing transfers from the domain +/// 4. Cancelling outgoing transfers from the domain. +fn update_domain_transfers( + domain_id: DomainId, + transfers: &Transfers>, + block_fees: BalanceOf, +) -> Result<(), TransferTrackerError> { + let Transfers { + transfers_in, + transfers_out, + transfers_rejected, + rejected_transfers_claimed, + } = transfers; + + // confirm incoming transfers + let er_chain_id = ChainId::Domain(domain_id); + transfers_in + .iter() + .try_for_each(|(from_chain_id, amount)| { + T::DomainsTransfersTracker::confirm_transfer(*from_chain_id, er_chain_id, *amount) + })?; + + // note outgoing transfers + transfers_out.iter().try_for_each(|(to_chain_id, amount)| { + T::DomainsTransfersTracker::note_transfer(er_chain_id, *to_chain_id, *amount) + })?; + + // note rejected transfers + transfers_rejected + .iter() + .try_for_each(|(from_chain_id, amount)| { + T::DomainsTransfersTracker::reject_transfer(*from_chain_id, er_chain_id, *amount) + })?; + + // claim rejected transfers + rejected_transfers_claimed + .iter() + .try_for_each(|(to_chain_id, amount)| { + T::DomainsTransfersTracker::claim_rejected_transfer(er_chain_id, *to_chain_id, *amount) + })?; + + // deduct execution fees from domain + T::DomainsTransfersTracker::reduce_domain_balance(domain_id, block_fees)?; + + Ok(()) +} + fn add_new_receipt_to_block_tree( domain_id: DomainId, submitter: OperatorId, @@ -687,7 +761,7 @@ mod tests { H256::random(), stale_receipt, ); - assert!(crate::Pallet::::submit_bundle(RawOrigin::None.into(), bundle,).is_err()); + assert!(crate::Pallet::::submit_bundle(RawOrigin::None.into(), bundle).is_err()); assert_eq!( BlockTreeNodes::::get(stale_receipt_hash) @@ -735,7 +809,7 @@ mod tests { H256::random(), previous_head_receipt, ); - assert!(crate::Pallet::::submit_bundle(RawOrigin::None.into(), bundle,).is_err()); + assert!(crate::Pallet::::submit_bundle(RawOrigin::None.into(), bundle).is_err()); }); } diff --git a/crates/pallet-domains/src/domain_registry.rs b/crates/pallet-domains/src/domain_registry.rs index 1469002936..ec57073b28 100644 --- a/crates/pallet-domains/src/domain_registry.rs +++ b/crates/pallet-domains/src/domain_registry.rs @@ -5,26 +5,28 @@ use crate::pallet::{DomainStakingSummary, NextEVMChainId}; use crate::runtime_registry::DomainRuntimeInfo; use crate::staking::StakingSummary; use crate::{ - Config, DomainHashingFor, DomainRegistry, ExecutionReceiptOf, HoldIdentifier, NextDomainId, - RuntimeRegistry, + BalanceOf, Config, DomainHashingFor, DomainRegistry, ExecutionReceiptOf, HoldIdentifier, + NextDomainId, RuntimeRegistry, }; use alloc::string::String; use codec::{Decode, Encode}; -use frame_support::traits::fungible::{Inspect, MutateHold}; -use frame_support::traits::tokens::{Fortitude, Preservation}; +use domain_runtime_primitives::MultiAccountId; +use frame_support::traits::fungible::{Inspect, Mutate, MutateHold}; +use frame_support::traits::tokens::{Fortitude, Precision, Preservation}; use frame_support::weights::Weight; use frame_support::{ensure, PalletError}; use frame_system::pallet_prelude::*; use scale_info::TypeInfo; use sp_core::Get; use sp_domains::{ - derive_domain_block_hash, DomainId, DomainsDigestItem, OperatorAllowList, RuntimeId, - RuntimeType, + derive_domain_block_hash, DomainId, DomainsDigestItem, DomainsTransfersTracker, + OperatorAllowList, RuntimeId, RuntimeType, }; use sp_runtime::traits::{CheckedAdd, Zero}; use sp_runtime::DigestItem; use sp_std::collections::btree_map::BTreeMap; use sp_std::collections::btree_set::BTreeSet; +use sp_std::vec::Vec; /// Domain registry specific errors #[derive(TypeInfo, Encode, Decode, PalletError, Debug, PartialEq)] @@ -42,10 +44,16 @@ pub enum Error { FailedToGenerateGenesisStateRoot, DomainNotFound, NotDomainOwner, + InitialBalanceOverflow, + TransfersTracker, + MinInitialAccountBalance, + MaxInitialDomainAccounts, + DuplicateInitialAccounts, + FailedToGenerateRawGenesis(crate::runtime_registry::Error), } #[derive(TypeInfo, Debug, Encode, Decode, Clone, PartialEq, Eq)] -pub struct DomainConfig { +pub struct DomainConfig { /// A user defined name for this domain, should be a human-readable UTF-8 encoded string. pub domain_name: String, /// A pointer to the `RuntimeRegistry` entry for this domain. @@ -61,10 +69,57 @@ pub struct DomainConfig { pub target_bundles_per_block: u32, /// Allowed operators to operate for this domain. pub operator_allow_list: OperatorAllowList, + // Initial balances for Domain. + pub initial_balances: Vec<(MultiAccountId, Balance)>, +} + +impl DomainConfig +where + AccountId: Ord, + Balance: Zero + CheckedAdd + PartialOrd, +{ + pub(crate) fn total_issuance(&self) -> Option { + self.initial_balances + .iter() + .try_fold(Balance::zero(), |total, (_, balance)| { + total.checked_add(balance) + }) + } + + pub(crate) fn check_initial_balances(&self) -> Result<(), Error> + where + Balance: From>, + { + let accounts: BTreeSet = self + .initial_balances + .iter() + .map(|(acc, _)| acc) + .cloned() + .collect(); + + ensure!( + accounts.len() == self.initial_balances.len(), + Error::DuplicateInitialAccounts + ); + + ensure!( + self.initial_balances.len() as u32 <= T::MaxInitialDomainAccounts::get(), + Error::MaxInitialDomainAccounts + ); + + for (_, balance) in &self.initial_balances { + ensure!( + *balance >= T::MinInitialDomainAccountBalance::get().into(), + Error::MinInitialAccountBalance + ); + } + + Ok(()) + } } #[derive(TypeInfo, Debug, Encode, Decode, Clone, PartialEq, Eq)] -pub struct DomainObject { +pub struct DomainObject { /// The address of the domain creator, used to validate updating the domain config. pub owner_account_id: AccountId, /// The consensus chain block number when the domain first instantiated. @@ -72,14 +127,14 @@ pub struct DomainObject { /// The hash of the genesis execution receipt for this domain. pub genesis_receipt_hash: ReceiptHash, /// The domain config. - pub domain_config: DomainConfig, + pub domain_config: DomainConfig, /// Domain runtime specific information. pub domain_runtime_info: DomainRuntimeInfo, } pub(crate) fn can_instantiate_domain( owner_account_id: &T::AccountId, - domain_config: &DomainConfig, + domain_config: &DomainConfig>, ) -> Result<(), Error> { ensure!( domain_config.domain_name.len() as u32 <= T::MaxDomainNameLength::get(), @@ -116,11 +171,13 @@ pub(crate) fn can_instantiate_domain( Error::InsufficientFund ); + domain_config.check_initial_balances::()?; + Ok(()) } pub(crate) fn do_instantiate_domain( - domain_config: DomainConfig, + domain_config: DomainConfig>, owner_account_id: T::AccountId, created_at: BlockNumberFor, ) -> Result { @@ -141,9 +198,32 @@ pub(crate) fn do_instantiate_domain( } }; + // burn total issuance on domain from owners account and track the domain balance + let total_issuance = domain_config + .total_issuance() + .ok_or(Error::InitialBalanceOverflow)?; + + T::Currency::burn_from( + &owner_account_id, + total_issuance, + Precision::Exact, + Fortitude::Polite, + ) + .map_err(|_| Error::InsufficientFund)?; + + T::DomainsTransfersTracker::initialize_domain_balance(domain_id, total_issuance) + .map_err(|_| Error::TransfersTracker)?; + let genesis_receipt = { let state_version = runtime_obj.version.state_version(); - let raw_genesis = runtime_obj.into_complete_raw_genesis(domain_id, domain_runtime_info); + let raw_genesis = runtime_obj + .into_complete_raw_genesis::( + domain_id, + domain_runtime_info, + total_issuance, + domain_config.initial_balances.clone(), + ) + .map_err(Error::FailedToGenerateRawGenesis)?; let state_root = raw_genesis.state_root::>(state_version); let genesis_block_hash = derive_domain_block_hash::( Zero::zero(), @@ -222,12 +302,16 @@ mod tests { use crate::pallet::{DomainRegistry, NextDomainId, RuntimeRegistry}; use crate::runtime_registry::RuntimeObject; use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; + use domain_runtime_primitives::{AccountId20, AccountId20Converter}; use frame_support::traits::Currency; + use frame_support::{assert_err, assert_ok}; + use hex_literal::hex; use sp_domains::storage::RawGenesis; + use sp_runtime::traits::Convert; use sp_std::collections::btree_set::BTreeSet; use sp_std::vec; use sp_version::RuntimeVersion; + use subspace_runtime_primitives::SSC; type Balances = pallet_balances::Pallet; @@ -244,6 +328,7 @@ mod tests { bundle_slot_probability: (0, 0), target_bundles_per_block: 0, operator_allow_list: OperatorAllowList::Anyone, + initial_balances: Default::default(), }; let mut ext = new_test_ext(); @@ -381,4 +466,167 @@ mod tests { ); }); } + + #[test] + fn test_domain_instantiation_evm_accounts() { + let creator = 1u128; + let created_at = 0u64; + // Construct an invalid domain config initially + let mut domain_config = DomainConfig { + domain_name: "evm-domain".to_owned(), + runtime_id: 0, + max_block_size: 10, + max_block_weight: Weight::from_parts(1, 0), + bundle_slot_probability: (1, 1), + target_bundles_per_block: 1, + operator_allow_list: OperatorAllowList::Anyone, + initial_balances: vec![(MultiAccountId::Raw(vec![0, 1, 2, 3, 4, 5]), 1_000_000 * SSC)], + }; + + let mut ext = new_test_ext(); + ext.execute_with(|| { + assert_eq!(NextDomainId::::get(), 0.into()); + // Register runtime id + RuntimeRegistry::::insert( + domain_config.runtime_id, + RuntimeObject { + runtime_name: "evm".to_owned(), + runtime_type: Default::default(), + runtime_upgrades: 0, + hash: Default::default(), + raw_genesis: RawGenesis::dummy(vec![1, 2, 3, 4]), + version: RuntimeVersion { + spec_name: "test".into(), + spec_version: 1, + impl_version: 1, + transaction_version: 1, + ..Default::default() + }, + created_at: Default::default(), + updated_at: Default::default(), + }, + ); + + // Set enough fund to creator + Balances::make_free_balance_be( + &creator, + ::DomainInstantiationDeposit::get() + // for domain total issuance + + 1_000_000 * SSC + + ::ExistentialDeposit::get(), + ); + + // should fail due to invalid account ID type + assert_err!( + do_instantiate_domain::(domain_config.clone(), creator, created_at), + Error::FailedToGenerateRawGenesis( + crate::runtime_registry::Error::InvalidAccountIdType + ) + ); + + // duplicate accounts + domain_config.initial_balances = vec![ + ( + AccountId20Converter::convert(AccountId20::from(hex!( + "f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac" + ))), + 1_000_000 * SSC, + ), + ( + AccountId20Converter::convert(AccountId20::from(hex!( + "f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac" + ))), + 1_000_000 * SSC, + ), + ]; + + assert_err!( + do_instantiate_domain::(domain_config.clone(), creator, created_at), + Error::DuplicateInitialAccounts + ); + + // max accounts + domain_config.initial_balances = vec![ + ( + AccountId20Converter::convert(AccountId20::from(hex!( + "f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac" + ))), + 1_000_000 * SSC, + ), + ( + AccountId20Converter::convert(AccountId20::from(hex!( + "f24FF3a9CF04c71Dbc94D0b566f7A27B94566cbc" + ))), + 1_000_000 * SSC, + ), + ( + AccountId20Converter::convert(AccountId20::from(hex!( + "f24FF3a9CF04c71Dbc94D0b566f7A27B94566ccc" + ))), + 1_000_000 * SSC, + ), + ( + AccountId20Converter::convert(AccountId20::from(hex!( + "f24FF3a9CF04c71Dbc94D0b566f7A27B94566cdc" + ))), + 1_000_000 * SSC, + ), + ( + AccountId20Converter::convert(AccountId20::from(hex!( + "f24FF3a9CF04c71Dbc94D0b566f7A27B94566cec" + ))), + 1_000_000 * SSC, + ), + ( + AccountId20Converter::convert(AccountId20::from(hex!( + "f24FF3a9CF04c71Dbc94D0b566f7A27B94566cfc" + ))), + 1_000_000 * SSC, + ), + ]; + + assert_err!( + do_instantiate_domain::(domain_config.clone(), creator, created_at), + Error::MaxInitialDomainAccounts + ); + + // min balance accounts + domain_config.initial_balances = vec![( + AccountId20Converter::convert(AccountId20::from(hex!( + "f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac" + ))), + 1, + )]; + + assert_err!( + do_instantiate_domain::(domain_config.clone(), creator, created_at), + Error::MinInitialAccountBalance + ); + + domain_config.initial_balances = vec![( + AccountId20Converter::convert(AccountId20::from(hex!( + "f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac" + ))), + 1_000_000 * SSC, + )]; + + // Set enough fund to creator + Balances::make_free_balance_be( + &creator, + ::DomainInstantiationDeposit::get() + // for domain total issuance + + 1_000_000 * SSC + + ::ExistentialDeposit::get(), + ); + + // should be successful + let domain_id = + do_instantiate_domain::(domain_config.clone(), creator, created_at).unwrap(); + let domain_obj = DomainRegistry::::get(domain_id).unwrap(); + + assert_eq!(domain_obj.owner_account_id, creator); + assert_eq!(domain_obj.created_at, created_at); + assert_eq!(domain_obj.domain_config, domain_config); + }); + } } diff --git a/crates/pallet-domains/src/lib.rs b/crates/pallet-domains/src/lib.rs index 5d22d770d7..65bd4b0a64 100644 --- a/crates/pallet-domains/src/lib.rs +++ b/crates/pallet-domains/src/lib.rs @@ -60,7 +60,7 @@ use sp_domains_fraud_proof::verification::{ verify_bundle_equivocation_fraud_proof, verify_invalid_block_fees_fraud_proof, verify_invalid_bundles_fraud_proof, verify_invalid_domain_block_hash_fraud_proof, verify_invalid_domain_extrinsics_root_fraud_proof, verify_invalid_state_transition_fraud_proof, - verify_valid_bundle_fraud_proof, + verify_invalid_transfers_fraud_proof, verify_valid_bundle_fraud_proof, }; use sp_runtime::traits::{Hash, Header, One, Zero}; use sp_runtime::{RuntimeAppPublic, SaturatedConversion, Saturating}; @@ -166,8 +166,8 @@ mod pallet { use sp_core::H256; use sp_domains::bundle_producer_election::ProofOfElectionError; use sp_domains::{ - BundleDigest, ConfirmedDomainBlock, DomainId, EpochIndex, GenesisDomain, OperatorAllowList, - OperatorId, OperatorPublicKey, RuntimeId, RuntimeType, + BundleDigest, ConfirmedDomainBlock, DomainId, DomainsTransfersTracker, EpochIndex, + GenesisDomain, OperatorAllowList, OperatorId, OperatorPublicKey, RuntimeId, RuntimeType, }; use sp_domains_fraud_proof::fraud_proof::FraudProof; use sp_domains_fraud_proof::InvalidTransactionCode; @@ -175,7 +175,7 @@ mod pallet { AtLeast32BitUnsigned, BlockNumberProvider, CheckEqual, CheckedAdd, Header as HeaderT, MaybeDisplay, One, SimpleBitOps, Zero, }; - use sp_runtime::{SaturatedConversion, Saturating}; + use sp_runtime::Saturating; use sp_std::boxed::Box; use sp_std::collections::btree_map::BTreeMap; use sp_std::collections::btree_set::BTreeSet; @@ -321,6 +321,15 @@ mod pallet { /// The block slot type BlockSlot: BlockSlot; + + /// Transfers tracker. + type DomainsTransfersTracker: DomainsTransfersTracker>; + + /// Upper limit for total initial accounts domains + type MaxInitialDomainAccounts: Get; + + /// Minimum balance for each initial domain account + type MinInitialDomainAccountBalance: Get>; } #[pallet::pallet] @@ -468,7 +477,7 @@ mod pallet { _, Identity, DomainId, - DomainObject, ReceiptHashFor, T::AccountId>, + DomainObject, ReceiptHashFor, T::AccountId, BalanceOf>, OptionQuery, >; @@ -619,6 +628,8 @@ mod pallet { DescendantsOfFraudulentERNotPruned, /// Invalid fraud proof since block fees are not mismatched. InvalidBlockFeesFraudProof, + /// Invalid fraud proof since transfers are not mismatched. + InvalidTransfersFraudProof, /// Invalid domain block hash fraud proof. InvalidDomainBlockHashFraudProof, /// Invalid domain extrinsic fraud proof @@ -970,7 +981,7 @@ mod pallet { #[pallet::call_index(1)] // TODO: proper weight - #[pallet::weight((Weight::from_all(10_000), Pays::No))] + #[pallet::weight((Weight::from_all(10_000), DispatchClass::Operational, Pays::No))] pub fn submit_fraud_proof( origin: OriginFor, fraud_proof: Box, T::Hash, T::DomainHeader>>, @@ -1156,7 +1167,7 @@ mod pallet { #[pallet::weight(T::WeightInfo::instantiate_domain())] pub fn instantiate_domain( origin: OriginFor, - domain_config: DomainConfig, + domain_config: DomainConfig>, ) -> DispatchResult { ensure_root(origin)?; @@ -1297,7 +1308,7 @@ mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { - pub genesis_domain: Option>, + pub genesis_domain: Option>>, } impl Default for GenesisConfig { @@ -1331,6 +1342,7 @@ mod pallet { bundle_slot_probability: genesis_domain.bundle_slot_probability, target_bundles_per_block: genesis_domain.target_bundles_per_block, operator_allow_list: genesis_domain.operator_allow_list, + initial_balances: genesis_domain.initial_balances, }; let domain_owner = genesis_domain.owner_account_id; let domain_id = @@ -1340,9 +1352,7 @@ mod pallet { // Register domain_owner as the genesis operator. let operator_config = OperatorConfig { signing_key: genesis_domain.signing_key.clone(), - minimum_nominator_stake: genesis_domain - .minimum_nominator_stake - .saturated_into(), + minimum_nominator_stake: genesis_domain.minimum_nominator_stake, nomination_tax: genesis_domain.nomination_tax, }; let operator_stake = T::MinOperatorStake::get(); @@ -1517,8 +1527,15 @@ impl Pallet { let domain_obj = DomainRegistry::::get(domain_id)?; let runtime_object = RuntimeRegistry::::get(domain_obj.domain_config.runtime_id)?; let runtime_type = runtime_object.runtime_type.clone(); - let raw_genesis = - runtime_object.into_complete_raw_genesis(domain_id, domain_obj.domain_runtime_info); + let total_issuance = domain_obj.domain_config.total_issuance()?; + let raw_genesis = runtime_object + .into_complete_raw_genesis::( + domain_id, + domain_obj.domain_runtime_info, + total_issuance, + domain_obj.domain_config.initial_balances, + ) + .ok()?; Some(( DomainInstanceData { runtime_type, @@ -1703,6 +1720,22 @@ impl Pallet { FraudProofError::InvalidBlockFeesFraudProof })?; } + FraudProof::InvalidTransfers(req) => { + verify_invalid_transfers_fraud_proof::< + T::Block, + DomainBlockNumberFor, + T::DomainHash, + BalanceOf, + DomainHashingFor, + >(bad_receipt, req) + .map_err(|err| { + log::error!( + target: "runtime::domains", + "Domain transfers proof verification failed: {err:?}" + ); + FraudProofError::InvalidTransfersFraudProof + })?; + } FraudProof::InvalidDomainBlockHash(InvalidDomainBlockHashProof { digest_storage_proof, .. diff --git a/crates/pallet-domains/src/runtime_registry.rs b/crates/pallet-domains/src/runtime_registry.rs index 240efbb0f4..8ed96b1ad5 100644 --- a/crates/pallet-domains/src/runtime_registry.rs +++ b/crates/pallet-domains/src/runtime_registry.rs @@ -1,18 +1,20 @@ //! Runtime registry for domains use crate::pallet::{NextRuntimeId, RuntimeRegistry, ScheduledRuntimeUpgrades}; -use crate::{Config, Event}; +use crate::{BalanceOf, Config, Event}; use alloc::string::String; use codec::{Decode, Encode}; -use domain_runtime_primitives::EVMChainId; +use domain_runtime_primitives::{AccountId20, EVMChainId, MultiAccountId, TryConvertBack}; use frame_support::PalletError; use frame_system::pallet_prelude::*; +use frame_system::AccountInfo; use scale_info::TypeInfo; use sp_core::Hasher; -use sp_domains::storage::RawGenesis; +use sp_domains::storage::{RawGenesis, StorageData, StorageKey}; use sp_domains::{DomainId, DomainsDigestItem, RuntimeId, RuntimeType}; -use sp_runtime::traits::{CheckedAdd, Get}; +use sp_runtime::traits::{CheckedAdd, Get, Zero}; use sp_runtime::DigestItem; +use sp_std::vec; use sp_std::vec::Vec; use sp_version::RuntimeVersion; @@ -28,6 +30,7 @@ pub enum Error { MaxScheduledBlockNumber, FailedToDecodeRawGenesis, RuntimeCodeNotFoundInRawGenesis, + InvalidAccountIdType, } #[derive(TypeInfo, Debug, Encode, Decode, Clone, PartialEq, Eq)] @@ -56,21 +59,68 @@ impl Default for DomainRuntimeInfo { } } +fn derive_initial_balances_storages( + total_issuance: BalanceOf, + balances: Vec<(AccountId, BalanceOf)>, +) -> Vec<(StorageKey, StorageData)> { + let total_issuance_key = sp_domains::domain_total_issuance_storage_key(); + let mut initial_storages = vec![(total_issuance_key, StorageData(total_issuance.encode()))]; + for (account_id, balance) in balances { + let account_storage_key = sp_domains::domain_account_storage_key(account_id); + let account_info = AccountInfo { + nonce: domain_runtime_primitives::Nonce::zero(), + consumers: 0, + // providers are set to 1 for new accounts + providers: 1, + sufficients: 0, + data: pallet_balances::AccountData { + free: balance, + ..Default::default() + }, + }; + initial_storages.push((account_storage_key, StorageData(account_info.encode()))) + } + + initial_storages +} + impl RuntimeObject { // Return a complete raw genesis with runtime code and domain id set properly - pub fn into_complete_raw_genesis( + pub fn into_complete_raw_genesis( self, domain_id: DomainId, domain_runtime_info: DomainRuntimeInfo, - ) -> RawGenesis { + total_issuance: BalanceOf, + initial_balances: Vec<(MultiAccountId, BalanceOf)>, + ) -> Result { let RuntimeObject { mut raw_genesis, .. } = self; raw_genesis.set_domain_id(domain_id); match domain_runtime_info { - DomainRuntimeInfo::EVM { chain_id } => raw_genesis.set_evm_chain_id(chain_id), + DomainRuntimeInfo::EVM { chain_id } => { + raw_genesis.set_evm_chain_id(chain_id); + let initial_balances = initial_balances.into_iter().try_fold( + Vec::<(AccountId20, BalanceOf)>::new(), + |mut balances, (account_id, balance)| { + let account_id = + domain_runtime_primitives::AccountId20Converter::try_convert_back( + account_id, + ) + .ok_or(Error::InvalidAccountIdType)?; + + balances.push((account_id, balance)); + Ok(balances) + }, + )?; + raw_genesis.set_top_storages(derive_initial_balances_storages::( + total_issuance, + initial_balances, + )); + } } - raw_genesis + + Ok(raw_genesis) } } diff --git a/crates/pallet-domains/src/staking.rs b/crates/pallet-domains/src/staking.rs index 11f7aba14f..c3930d4ff9 100644 --- a/crates/pallet-domains/src/staking.rs +++ b/crates/pallet-domains/src/staking.rs @@ -1291,6 +1291,7 @@ pub(crate) mod tests { bundle_slot_probability: (0, 0), target_bundles_per_block: 0, operator_allow_list: OperatorAllowList::Anyone, + initial_balances: Default::default(), }; let domain_obj = DomainObject { @@ -1685,6 +1686,7 @@ pub(crate) mod tests { bundle_slot_probability: (0, 0), target_bundles_per_block: 0, operator_allow_list: OperatorAllowList::Anyone, + initial_balances: Default::default(), }; let domain_obj = DomainObject { @@ -1798,6 +1800,7 @@ pub(crate) mod tests { bundle_slot_probability: (0, 0), target_bundles_per_block: 0, operator_allow_list: OperatorAllowList::Anyone, + initial_balances: Default::default(), }; let domain_obj = DomainObject { diff --git a/crates/pallet-domains/src/staking_epoch.rs b/crates/pallet-domains/src/staking_epoch.rs index c6cda6a8a9..c07e7a1d1a 100644 --- a/crates/pallet-domains/src/staking_epoch.rs +++ b/crates/pallet-domains/src/staking_epoch.rs @@ -537,6 +537,7 @@ mod tests { bundle_slot_probability: (0, 0), target_bundles_per_block: 0, operator_allow_list: OperatorAllowList::Anyone, + initial_balances: Default::default(), }; let domain_obj = DomainObject { diff --git a/crates/pallet-domains/src/tests.rs b/crates/pallet-domains/src/tests.rs index 387395cd2d..02a4633787 100644 --- a/crates/pallet-domains/src/tests.rs +++ b/crates/pallet-domains/src/tests.rs @@ -26,7 +26,7 @@ use sp_domains::merkle_tree::MerkleTree; use sp_domains::proof_provider_and_verifier::StorageProofProvider; use sp_domains::storage::RawGenesis; use sp_domains::{ - BundleHeader, DomainId, DomainsHoldIdentifier, ExecutionReceipt, ExtrinsicDigest, + BundleHeader, ChainId, DomainId, DomainsHoldIdentifier, ExecutionReceipt, ExtrinsicDigest, InboxedBundle, InvalidBundleType, OpaqueBundle, OperatorAllowList, OperatorId, OperatorPair, ProofOfElection, RuntimeType, SealedBundleHeader, StakingHoldIdentifier, }; @@ -184,6 +184,8 @@ parameter_types! { pub const MaxNominators: u32 = 5; pub const DomainsPalletId: PalletId = PalletId(*b"domains_"); pub const DomainChainByteFee: Balance = 1; + pub const MaxInitialDomainAccounts: u32 = 5; + pub const MinInitialDomainAccountBalance: Balance = SSC; } pub struct MockRandomness; @@ -225,6 +227,55 @@ impl BlockSlot for DummyBlockSlot { } } +pub struct MockDomainsTransfersTracker; + +impl sp_domains::DomainsTransfersTracker for MockDomainsTransfersTracker { + type Error = (); + + fn initialize_domain_balance( + _domain_id: DomainId, + _amount: Balance, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn note_transfer( + _from_chain_id: ChainId, + _to_chain_id: ChainId, + _amount: Balance, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn confirm_transfer( + _from_chain_id: ChainId, + _to_chain_id: ChainId, + _amount: Balance, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn claim_rejected_transfer( + _from_chain_id: ChainId, + _to_chain_id: ChainId, + _amount: Balance, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn reject_transfer( + _from_chain_id: ChainId, + _to_chain_id: ChainId, + _amount: Balance, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn reduce_domain_balance(_domain_id: DomainId, _amount: Balance) -> Result<(), Self::Error> { + Ok(()) + } +} + impl pallet_domains::Config for Test { type RuntimeEvent = RuntimeEvent; type DomainHash = sp_core::H256; @@ -255,6 +306,9 @@ impl pallet_domains::Config for Test { type PalletId = DomainsPalletId; type StorageFee = DummyStorageFee; type BlockSlot = DummyBlockSlot; + type DomainsTransfersTracker = MockDomainsTransfersTracker; + type MaxInitialDomainAccounts = MaxInitialDomainAccounts; + type MinInitialDomainAccountBalance = MinInitialDomainAccountBalance; } pub struct ExtrinsicStorageFees; @@ -375,6 +429,9 @@ impl FraudProofHostFunctions for MockDomainFraudProofExtension { self.maybe_illegal_extrinsic_index, ) } + FraudProofVerificationInfoRequest::StorageKey { .. } => { + FraudProofVerificationInfoResponse::StorageKey(None) + } }; Some(response) @@ -554,6 +611,7 @@ pub(crate) fn register_genesis_domain(creator: u128, operator_ids: Vec = fn generate_invalid_block_fees_fraud_proof( domain_id: DomainId, bad_receipt_hash: ReceiptHashFor, - block_fees: domain_runtime_primitives::BlockFees>, + block_fees: sp_domains::BlockFees>, ) -> (FraudProofFor, T::Hash) { let storage_key = sp_domains_fraud_proof::fraud_proof::operator_block_fees_final_key(); let mut root = T::Hash::default(); diff --git a/crates/pallet-runtime-configs/src/lib.rs b/crates/pallet-runtime-configs/src/lib.rs index f408ed6520..d34526bdae 100644 --- a/crates/pallet-runtime-configs/src/lib.rs +++ b/crates/pallet-runtime-configs/src/lib.rs @@ -117,7 +117,7 @@ mod pallet { impl Pallet { /// Change enable domains state. #[pallet::call_index(0)] - #[pallet::weight(::WeightInfo::set_enable_domains())] + #[pallet::weight(< T as Config >::WeightInfo::set_enable_domains())] pub fn set_enable_domains(origin: OriginFor, enable_domains: bool) -> DispatchResult { ensure_root(origin)?; @@ -128,21 +128,21 @@ mod pallet { /// Enable or disable dynamic cost of storage. #[pallet::call_index(1)] - #[pallet::weight(::WeightInfo::set_enable_dynamic_cost_of_storage())] + #[pallet::weight(< T as Config >::WeightInfo::set_enable_dynamic_cost_of_storage())] pub fn set_enable_dynamic_cost_of_storage( origin: OriginFor, enable_dynamic_cost_of_storage: bool, ) -> DispatchResult { ensure_root(origin)?; - EnableBalanceTransfers::::put(enable_dynamic_cost_of_storage); + EnableDynamicCostOfStorage::::put(enable_dynamic_cost_of_storage); Ok(()) } /// Enable or disable balance transfers for all users. #[pallet::call_index(2)] - #[pallet::weight(::WeightInfo::set_enable_balance_transfers())] + #[pallet::weight(< T as Config >::WeightInfo::set_enable_balance_transfers())] pub fn set_enable_balance_transfers( origin: OriginFor, enable_balance_transfers: bool, @@ -156,7 +156,7 @@ mod pallet { /// Enable or disable calls from non-root users. #[pallet::call_index(3)] - #[pallet::weight(::WeightInfo::set_enable_non_root_calls())] + #[pallet::weight(< T as Config >::WeightInfo::set_enable_non_root_calls())] pub fn set_enable_non_root_calls( origin: OriginFor, enable_non_root_calls: bool, diff --git a/crates/pallet-subspace/src/mock.rs b/crates/pallet-subspace/src/mock.rs index da8512f466..61c48d2bc4 100644 --- a/crates/pallet-subspace/src/mock.rs +++ b/crates/pallet-subspace/src/mock.rs @@ -38,11 +38,11 @@ use sp_runtime::testing::{Digest, DigestItem, Header, TestXt}; use sp_runtime::traits::{Block as BlockT, Header as _, IdentityLookup}; use sp_runtime::{BuildStorage, Perbill}; use sp_weights::Weight; -use std::iter; use std::marker::PhantomData; use std::num::{NonZeroU32, NonZeroU64, NonZeroUsize}; use std::simd::Simd; use std::sync::{Once, OnceLock}; +use std::{iter, slice}; use subspace_archiving::archiver::{Archiver, NewArchivedSegment}; use subspace_core_primitives::crypto::kzg::{embedded_kzg_settings, Kzg}; use subspace_core_primitives::crypto::Scalar; @@ -464,7 +464,8 @@ pub fn create_signed_vote( sector_metadata_output: &mut plotted_sector_metadata_bytes, downloading_semaphore: None, encoding_semaphore: None, - table_generator: &mut table_generator, + table_generators: slice::from_mut(&mut table_generator), + abort_early: &Default::default(), })) .unwrap(); diff --git a/crates/sp-domains-fraud-proof/src/fraud_proof.rs b/crates/sp-domains-fraud-proof/src/fraud_proof.rs index f8ca5f0a67..b3a9f82e17 100644 --- a/crates/sp-domains-fraud-proof/src/fraud_proof.rs +++ b/crates/sp-domains-fraud-proof/src/fraud_proof.rs @@ -296,6 +296,11 @@ pub enum VerificationError { RuntimeCode(String), #[cfg_attr(feature = "thiserror", error("Failed to get domain runtime code"))] FailedToGetDomainRuntimeCode, + #[cfg_attr( + feature = "thiserror", + error("Failed to get domain transfers storage key") + )] + FailedToGetDomainTransfersStorageKey, #[cfg(feature = "std")] #[cfg_attr( feature = "thiserror", @@ -447,6 +452,7 @@ pub enum FraudProof { ValidBundle(ValidBundleProof>), InvalidDomainBlockHash(InvalidDomainBlockHashProof>), InvalidBundles(InvalidBundlesFraudProof>), + InvalidTransfers(InvalidTransfersProof>), // Dummy fraud proof only used in test and benchmark // // NOTE: the `Dummy` must be the last variant, because the `#[cfg(..)]` will apply to @@ -474,6 +480,7 @@ impl FraudProof Self::InvalidBundles(proof) => proof.domain_id, Self::ValidBundle(proof) => proof.domain_id, Self::InvalidDomainBlockHash(proof) => proof.domain_id, + Self::InvalidTransfers(proof) => proof.domain_id, } } @@ -492,6 +499,7 @@ impl FraudProof Self::ValidBundle(proof) => Some(proof.bad_receipt_hash), Self::InvalidBundles(proof) => Some(proof.bad_receipt_hash), Self::InvalidDomainBlockHash(proof) => Some(proof.bad_receipt_hash), + Self::InvalidTransfers(proof) => Some(proof.bad_receipt_hash), } } @@ -623,6 +631,17 @@ pub struct InvalidBlockFeesProof { pub storage_proof: StorageProof, } +/// Represents an invalid transfers proof. +#[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] +pub struct InvalidTransfersProof { + /// The id of the domain this fraud proof targeted + pub domain_id: DomainId, + /// Hash of the bad receipt this fraud proof targeted + pub bad_receipt_hash: ReceiptHash, + /// Storage witness needed for verifying this proof. + pub storage_proof: StorageProof, +} + /// Represents an invalid domain block hash fraud proof. #[derive(Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo)] pub struct InvalidDomainBlockHashProof { diff --git a/crates/sp-domains-fraud-proof/src/host_functions.rs b/crates/sp-domains-fraud-proof/src/host_functions.rs index 4fb333129e..90f5ceba1f 100644 --- a/crates/sp-domains-fraud-proof/src/host_functions.rs +++ b/crates/sp-domains-fraud-proof/src/host_functions.rs @@ -1,5 +1,6 @@ use crate::{ FraudProofVerificationInfoRequest, FraudProofVerificationInfoResponse, SetCodeExtrinsic, + StorageKeyRequest, }; use codec::{Decode, Encode}; use domain_block_preprocessor::inherents::extract_domain_runtime_upgrade_code; @@ -308,6 +309,23 @@ where )) } + fn storage_key( + &self, + consensus_block_hash: H256, + domain_id: DomainId, + req: StorageKeyRequest, + ) -> Option> { + let runtime_code = self.get_domain_runtime_code(consensus_block_hash, domain_id)?; + let domain_stateless_runtime = + StatelessRuntime::::new(self.executor.clone(), runtime_code.into()); + Some( + match req { + StorageKeyRequest::Transfers => domain_stateless_runtime.transfers_storage_key(), + } + .expect("Domain Runtime Api should not fail. There is no recovery from this; qed."), + ) + } + fn get_domain_election_params( &self, consensus_block_hash: H256, @@ -488,6 +506,11 @@ where transactions_check_result, ) }), + FraudProofVerificationInfoRequest::StorageKey { domain_id, req } => { + Some(FraudProofVerificationInfoResponse::StorageKey( + self.storage_key(consensus_block_hash, domain_id, req), + )) + } } } diff --git a/crates/sp-domains-fraud-proof/src/lib.rs b/crates/sp-domains-fraud-proof/src/lib.rs index f589d1258e..411cd7a72a 100644 --- a/crates/sp-domains-fraud-proof/src/lib.rs +++ b/crates/sp-domains-fraud-proof/src/lib.rs @@ -80,6 +80,13 @@ impl From for TransactionValidity { } } +/// Type that specifies the request of storage keys +#[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)] +pub enum StorageKeyRequest { + /// Domain's transfers storage key + Transfers, +} + /// Request type to fetch required verification information for fraud proof through Host function. #[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)] pub enum FraudProofVerificationInfoRequest { @@ -137,6 +144,11 @@ pub enum FraudProofVerificationInfoRequest { /// Storage proof for the keys used in validating the extrinsic storage_proof: StorageProof, }, + /// Request to fetch a specific storage key + StorageKey { + domain_id: DomainId, + req: StorageKeyRequest, + }, } impl PassBy for FraudProofVerificationInfoRequest { @@ -183,6 +195,8 @@ pub enum FraudProofVerificationInfoResponse { OperatorStake(Balance), /// Result of check extrinsics in single context CheckExtrinsicsInSingleContext(Option), + /// Result of the storage key request + StorageKey(Option>), } impl FraudProofVerificationInfoResponse { @@ -282,6 +296,13 @@ impl FraudProofVerificationInfoResponse { _ => None, } } + + pub fn into_storage_key(self) -> Option> { + match self { + FraudProofVerificationInfoResponse::StorageKey(result) => result, + _ => None, + } + } } sp_api::decl_runtime_apis! { diff --git a/crates/sp-domains-fraud-proof/src/tests.rs b/crates/sp-domains-fraud-proof/src/tests.rs index 32550adc34..80a4f2659a 100644 --- a/crates/sp-domains-fraud-proof/src/tests.rs +++ b/crates/sp-domains-fraud-proof/src/tests.rs @@ -2,7 +2,7 @@ use crate::test_ethereum_tx::{ EIP1559UnsignedTransaction, EIP2930UnsignedTransaction, LegacyUnsignedTransaction, }; use codec::Encode; -use domain_runtime_primitives::{Balance, CheckExtrinsicsValidityError, DomainCoreApi}; +use domain_runtime_primitives::{Balance, CheckExtrinsicsValidityError}; use domain_test_service::evm_domain_test_runtime::{ Runtime as TestRuntime, RuntimeCall, UncheckedExtrinsic as RuntimeUncheckedExtrinsic, }; @@ -23,6 +23,7 @@ use sp_api::{ApiExt, ProvideRuntimeApi, TransactionOutcome}; use sp_core::crypto::AccountId32; use sp_core::ecdsa::Pair; use sp_core::{keccak_256, Pair as _, H160, H256, U256}; +use sp_domains::core_api::DomainCoreApi; use sp_runtime::traits::{Extrinsic, Zero}; use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidityError}; use sp_runtime::OpaqueExtrinsic; @@ -170,8 +171,7 @@ async fn benchmark_bundle_with_evm_tx( const TX_TYPES: u32 = 4; let mut thread_rng = rand::thread_rng(); let between = Uniform::from(0..TX_TYPES); - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.is_some()); + let (slot, _) = ferdie.produce_slot_and_wait_for_bundle_submission().await; produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await .unwrap(); @@ -379,7 +379,7 @@ async fn storage_change_of_the_same_runtime_instance_should_perserved_cross_runt best_hash, vec![ transfer_to_charlie_with_big_tip_1.clone().into(), - transfer_to_charlie_with_big_tip_2.clone().into() + transfer_to_charlie_with_big_tip_2.clone().into(), ], best_number, best_hash, @@ -389,7 +389,7 @@ async fn storage_change_of_the_same_runtime_instance_should_perserved_cross_runt extrinsic_index: 1, transaction_validity_error: TransactionValidityError::Invalid( InvalidTransaction::Payment - ) + ), }) ); @@ -411,7 +411,7 @@ async fn storage_change_of_the_same_runtime_instance_should_perserved_cross_runt extrinsic_index: 0, transaction_validity_error: TransactionValidityError::Invalid( InvalidTransaction::Future - ) + ), }) ); @@ -460,7 +460,7 @@ async fn storage_change_of_the_same_runtime_instance_should_perserved_cross_runt best_hash, vec![transfer_with_big_tip_1.clone().into()], best_number, - best_hash + best_hash, ) .unwrap() .is_ok()); @@ -491,7 +491,7 @@ async fn storage_change_of_the_same_runtime_instance_should_perserved_cross_runt best_hash, vec![transfer_with_big_tip_3.clone().into()], best_number, - best_hash + best_hash, ) .unwrap(), if commit_mode { @@ -754,7 +754,7 @@ async fn test_evm_domain_block_fee() { // Produce a bundle that contains the just sent extrinsic let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert_eq!(bundle.unwrap().extrinsics.len(), 3); + assert_eq!(bundle.extrinsics.len(), 3); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await .unwrap(); @@ -762,7 +762,7 @@ async fn test_evm_domain_block_fee() { // Produce one more bundle, this bundle should contains the ER of the previous bundle let (_, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let receipt = bundle.unwrap().into_receipt(); + let receipt = bundle.into_receipt(); assert_eq!(receipt.consensus_block_hash, consensus_block_hash); // All the transaction fee is collected as operator reward diff --git a/crates/sp-domains-fraud-proof/src/verification.rs b/crates/sp-domains-fraud-proof/src/verification.rs index 388a4f768a..a0e173f7c1 100644 --- a/crates/sp-domains-fraud-proof/src/verification.rs +++ b/crates/sp-domains-fraud-proof/src/verification.rs @@ -1,14 +1,14 @@ use crate::fraud_proof::{ InvalidBundlesFraudProof, InvalidExtrinsicsRootProof, InvalidStateTransitionProof, - ValidBundleProof, VerificationError, + InvalidTransfersProof, ValidBundleProof, VerificationError, }; use crate::fraud_proof_runtime_interface::get_fraud_proof_verification_info; use crate::{ fraud_proof_runtime_interface, FraudProofVerificationInfoRequest, - FraudProofVerificationInfoResponse, SetCodeExtrinsic, + FraudProofVerificationInfoResponse, SetCodeExtrinsic, StorageKeyRequest, }; use codec::{Decode, Encode}; -use domain_runtime_primitives::{BlockFees, BlockNumber}; +use domain_runtime_primitives::BlockNumber; use hash_db::Hasher; use sp_core::storage::StorageKey; use sp_core::H256; @@ -17,8 +17,9 @@ use sp_domains::extrinsics::{deduplicate_and_shuffle_extrinsics, extrinsics_shuf use sp_domains::proof_provider_and_verifier::StorageProofVerifier; use sp_domains::valued_trie::valued_ordered_trie_root; use sp_domains::{ - BundleValidity, ExecutionReceipt, ExtrinsicDigest, HeaderHashFor, HeaderHashingFor, + BlockFees, BundleValidity, ExecutionReceipt, ExtrinsicDigest, HeaderHashFor, HeaderHashingFor, HeaderNumberFor, InboxedBundle, InvalidBundleType, OperatorPublicKey, SealedBundleHeader, + Transfers, }; use sp_runtime::generic::Digest; use sp_runtime::traits::{ @@ -357,6 +358,61 @@ where Ok(()) } +/// Verifies invalid transfers fraud proof. +pub fn verify_invalid_transfers_fraud_proof< + CBlock, + DomainNumber, + DomainHash, + Balance, + DomainHashing, +>( + bad_receipt: ExecutionReceipt< + NumberFor, + CBlock::Hash, + DomainNumber, + DomainHash, + Balance, + >, + proof: &InvalidTransfersProof, +) -> Result<(), VerificationError> +where + CBlock: BlockT, + CBlock::Hash: Into, + Balance: PartialEq + Decode, + DomainHashing: Hasher, +{ + let InvalidTransfersProof { + domain_id, + storage_proof, + .. + } = proof; + + let storage_key = get_fraud_proof_verification_info( + bad_receipt.consensus_block_hash.into(), + FraudProofVerificationInfoRequest::StorageKey { + domain_id: *domain_id, + req: StorageKeyRequest::Transfers, + }, + ) + .and_then(FraudProofVerificationInfoResponse::into_storage_key) + .ok_or(VerificationError::FailedToGetDomainTransfersStorageKey)?; + let storage_proof = storage_proof.clone(); + + let transfers = StorageProofVerifier::::get_decoded_value::>( + &bad_receipt.final_state_root, + storage_proof, + StorageKey(storage_key), + ) + .map_err(|_| VerificationError::InvalidStorageProof)?; + + // if the rewards matches, then this is an invalid fraud proof since rewards must be different. + if bad_receipt.transfers == transfers { + return Err(VerificationError::InvalidProof); + } + + Ok(()) +} + /// This function checks if this fraud proof is expected against the inboxed bundle entry it is targeting. /// If the entry is expected then it will be returned /// In any other cases VerificationError will be returned diff --git a/crates/sp-domains/src/core_api.rs b/crates/sp-domains/src/core_api.rs new file mode 100644 index 0000000000..f363eb6b52 --- /dev/null +++ b/crates/sp-domains/src/core_api.rs @@ -0,0 +1,82 @@ +use crate::{BlockFees, Transfers}; +use domain_runtime_primitives::{ + opaque, Balance, CheckExtrinsicsValidityError, DecodeExtrinsicError, +}; +use sp_runtime::generic::Era; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_runtime::Digest; +use sp_std::vec::Vec; +use sp_weights::Weight; +use subspace_core_primitives::U256; +use subspace_runtime_primitives::Moment; + +sp_api::decl_runtime_apis! { + /// Base API that every domain runtime must implement. + pub trait DomainCoreApi { + /// Extracts the optional signer per extrinsic. + fn extract_signer( + extrinsics: Vec<::Extrinsic>, + ) -> Vec<(Option, ::Extrinsic)>; + + fn is_within_tx_range( + extrinsic: &::Extrinsic, + bundle_vrf_hash: &U256, + tx_range: &U256, + ) -> bool; + + /// Returns the intermediate storage roots in an encoded form. + fn intermediate_roots() -> Vec<[u8; 32]>; + + /// Returns the storage root after initializing the block. + fn initialize_block_with_post_state_root(header: &::Header) -> Vec; + + /// Returns the storage root after applying the extrinsic. + fn apply_extrinsic_with_post_state_root(extrinsic: ::Extrinsic) -> Vec; + + /// Returns an encoded extrinsic aiming to upgrade the runtime using given code. + fn construct_set_code_extrinsic(code: Vec) -> Vec; + + /// Returns an encoded extrinsic to set timestamp. + fn construct_timestamp_extrinsic(moment: Moment) -> Block::Extrinsic; + + /// Returns an encoded extrinsic to set domain transaction byte fee. + fn construct_consensus_chain_byte_fee_extrinsic(consensus_chain_byte_fee: Balance) -> Block::Extrinsic; + + /// Returns true if the extrinsic is an inherent extrinsic. + fn is_inherent_extrinsic(extrinsic: &::Extrinsic) -> bool; + + /// Checks the validity of array of extrinsics + pre_dispatch + /// returning failure on first extrinsic that fails runtime call. + /// IMPORTANT: Change `CHECK_EXTRINSICS_AND_DO_PRE_DISPATCH_METHOD_NAME` constant when this method name is changed + fn check_extrinsics_and_do_pre_dispatch(uxts: Vec<::Extrinsic>, block_number: NumberFor, + block_hash: ::Hash) -> Result<(), CheckExtrinsicsValidityError>; + + /// Decodes the domain specific extrinsic from the opaque extrinsic. + fn decode_extrinsic( + opaque_extrinsic: sp_runtime::OpaqueExtrinsic, + ) -> Result<::Extrinsic, DecodeExtrinsicError>; + + /// Returns extrinsic Era if present. + fn extrinsic_era( + extrinsic: &::Extrinsic + ) -> Option; + + /// Returns the extrinsic weight. + fn extrinsic_weight(ext: &Block::Extrinsic) -> Weight; + + /// The accumulated transaction fee of all transactions included in the block. + fn block_fees() -> BlockFees; + + /// Returns the block digest. + fn block_digest() -> Digest; + + /// Returns the consumed weight of the block. + fn block_weight() -> Weight; + + /// Returns the transfers for this domain in the block. + fn transfers() -> Transfers; + + /// Returns the storage key for the Transfers on Domain. + fn transfers_storage_key() -> Vec; + } +} diff --git a/crates/sp-domains/src/lib.rs b/crates/sp-domains/src/lib.rs index 2c0911f5a1..71803689b2 100644 --- a/crates/sp-domains/src/lib.rs +++ b/crates/sp-domains/src/lib.rs @@ -18,6 +18,7 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod bundle_producer_election; +pub mod core_api; pub mod extrinsics; pub mod merkle_tree; pub mod proof_provider_and_verifier; @@ -34,7 +35,9 @@ use bundle_producer_election::{BundleProducerElectionParams, ProofOfElectionErro use core::num::ParseIntError; use core::ops::{Add, Sub}; use core::str::FromStr; -use domain_runtime_primitives::BlockFees; +use domain_runtime_primitives::MultiAccountId; +use frame_support::storage::storage_prefix; +use frame_support::{Blake2_128Concat, StorageHasher}; use hexlit::hex; use parity_scale_codec::{Codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -254,6 +257,63 @@ impl From for ChainId { } } +#[derive(Clone, Debug, Decode, Default, Encode, Eq, PartialEq, TypeInfo)] +pub struct BlockFees { + /// The consensus chain storage fee + pub consensus_storage_fee: Balance, + /// The domain execution fee including the storage and compute fee on domain chain, + /// tip, and the XDM reward. + pub domain_execution_fee: Balance, + /// Burned balances on domain chain + pub burned_balance: Balance, +} + +impl BlockFees +where + Balance: CheckedAdd, +{ + pub fn new( + domain_execution_fee: Balance, + consensus_storage_fee: Balance, + burned_balance: Balance, + ) -> Self { + BlockFees { + consensus_storage_fee, + domain_execution_fee, + burned_balance, + } + } + + /// Returns the total fees that was collected and burned on the Domain. + pub fn total_fees(&self) -> Option { + self.consensus_storage_fee + .checked_add(&self.domain_execution_fee) + .and_then(|balance| balance.checked_add(&self.burned_balance)) + } +} + +/// Type that holds the transfers(in/out) for a given chain. +#[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone, Default)] +pub struct Transfers { + /// Total transfers that came into the domain. + pub transfers_in: BTreeMap, + /// Total transfers that went out of the domain. + pub transfers_out: BTreeMap, + /// Total transfers from this domain that were reverted. + pub rejected_transfers_claimed: BTreeMap, + /// Total transfers to this domain that were rejected. + pub transfers_rejected: BTreeMap, +} + +impl Transfers { + pub fn is_valid(&self, chain_id: ChainId) -> bool { + !self.transfers_rejected.contains_key(&chain_id) + && !self.transfers_in.contains_key(&chain_id) + && !self.transfers_out.contains_key(&chain_id) + && !self.rejected_transfers_claimed.contains_key(&chain_id) + } +} + #[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)] pub struct BundleHeader { /// Proof of bundle producer election. @@ -493,7 +553,7 @@ pub struct ExecutionReceipt { /// storage fees are given to the consensus block author. pub block_fees: BlockFees, /// List of transfers from this Domain to other chains - pub transfers: BTreeMap, + pub transfers: Transfers, } impl @@ -710,7 +770,7 @@ impl OperatorAllowList { } #[derive(TypeInfo, Debug, Encode, Decode, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct GenesisDomain { +pub struct GenesisDomain { // Domain runtime items pub runtime_name: String, pub runtime_type: RuntimeType, @@ -730,6 +790,9 @@ pub struct GenesisDomain { pub signing_key: OperatorPublicKey, pub minimum_nominator_stake: Balance, pub nomination_tax: Percent, + + // initial balances + pub initial_balances: Vec<(MultiAccountId, Balance)>, } /// Types of runtime pallet domains currently supports @@ -817,7 +880,7 @@ impl DomainsDigestItem for DigestItem { /// TODO: once the chain is launched in mainnet, we should use the Host function for all domain instances. pub(crate) fn evm_chain_id_storage_key() -> StorageKey { StorageKey( - frame_support::storage::storage_prefix( + storage_prefix( // This is the name used for the `pallet_evm_chain_id` in the `construct_runtime` macro // i.e. `EVMChainId: pallet_evm_chain_id = 82,` "EVMChainId".as_bytes(), @@ -828,6 +891,42 @@ pub(crate) fn evm_chain_id_storage_key() -> StorageKey { ) } +/// Total issuance storage for Domains. +/// +/// This function should ideally use Host function to fetch the storage key +/// from the domain runtime. But since the Host function is not available at Genesis, we have to +/// assume the storage keys. +/// TODO: once the chain is launched in mainnet, we should use the Host function for all domain instances. +pub fn domain_total_issuance_storage_key() -> StorageKey { + StorageKey( + storage_prefix( + // This is the name used for the `pallet_balances` in the `construct_runtime` macro + "Balances".as_bytes(), + // This is the storage item name used inside the `pallet_balances` + "TotalIssuance".as_bytes(), + ) + .to_vec(), + ) +} + +/// Account info on frame_system on Domains +/// +/// This function should ideally use Host function to fetch the storage key +/// from the domain runtime. But since the Host function is not available at Genesis, we have to +/// assume the storage keys. +/// TODO: once the chain is launched in mainnet, we should use the Host function for all domain instances. +pub fn domain_account_storage_key(who: AccountId) -> StorageKey { + let storage_prefix = storage_prefix("System".as_bytes(), "Account".as_bytes()); + let key_hashed = who.using_encoded(Blake2_128Concat::hash); + + let mut final_key = Vec::with_capacity(storage_prefix.len() + key_hashed.len()); + + final_key.extend_from_slice(&storage_prefix); + final_key.extend_from_slice(key_hashed.as_ref()); + + StorageKey(final_key) +} + /// The storage key of the `SelfDomainId` storage item in the `pallet-domain-id` /// /// Any change to the storage item name or the `pallet-domain-id` name used in the `construct_runtime` @@ -1026,6 +1125,46 @@ impl ExtrinsicDigest { } } +/// Trait that tracks the balances on Domains. +pub trait DomainsTransfersTracker { + type Error; + + /// Initializes the domain balance + fn initialize_domain_balance(domain_id: DomainId, amount: Balance) -> Result<(), Self::Error>; + + /// Notes a transfer between chains. + /// Balance on from_chain_id is reduced if it is a domain chain + fn note_transfer( + from_chain_id: ChainId, + to_chain_id: ChainId, + amount: Balance, + ) -> Result<(), Self::Error>; + + /// Confirms a transfer between chains. + fn confirm_transfer( + from_chain_id: ChainId, + to_chain_id: ChainId, + amount: Balance, + ) -> Result<(), Self::Error>; + + /// Claims a rejected transfer between chains. + fn claim_rejected_transfer( + from_chain_id: ChainId, + to_chain_id: ChainId, + amount: Balance, + ) -> Result<(), Self::Error>; + + /// Rejects a initiated transfer between chains. + fn reject_transfer( + from_chain_id: ChainId, + to_chain_id: ChainId, + amount: Balance, + ) -> Result<(), Self::Error>; + + /// Reduces a given amount from the domain balance + fn reduce_domain_balance(domain_id: DomainId, amount: Balance) -> Result<(), Self::Error>; +} + pub type ExecutionReceiptFor = ExecutionReceipt< NumberFor, ::Hash, diff --git a/crates/sp-domains/src/storage.rs b/crates/sp-domains/src/storage.rs index 1465c210d6..3d6f02b0ee 100644 --- a/crates/sp-domains/src/storage.rs +++ b/crates/sp-domains/src/storage.rs @@ -57,6 +57,12 @@ impl RawGenesis { .insert(evm_chain_id_storage_key(), StorageData(chain_id.encode())); } + pub fn set_top_storages(&mut self, storages: Vec<(StorageKey, StorageData)>) { + for (k, v) in storages { + let _ = self.top.insert(k, v); + } + } + fn set_runtime_code(&mut self, code: Vec) { let _ = self.top.insert( StorageKey(well_known_keys::CODE.to_vec()), diff --git a/crates/subspace-farmer-components/Cargo.toml b/crates/subspace-farmer-components/Cargo.toml index 5b631fbc9d..0c64c49c47 100644 --- a/crates/subspace-farmer-components/Cargo.toml +++ b/crates/subspace-farmer-components/Cargo.toml @@ -25,6 +25,7 @@ futures = "0.3.29" hex = "0.4.3" libc = "0.2.152" parity-scale-codec = "3.6.9" +parking_lot = "0.12.1" rand = "0.8.5" rayon = "1.8.1" schnorrkel = "0.11.4" @@ -45,7 +46,6 @@ winapi = "0.3.9" [dev-dependencies] criterion = "0.5.1" futures = "0.3.29" -parking_lot = "0.12.1" subspace-archiving = { version = "0.1.0", path = "../subspace-archiving" } subspace-proof-of-space = { version = "0.1.0", path = "../subspace-proof-of-space" } diff --git a/crates/subspace-farmer-components/benches/auditing.rs b/crates/subspace-farmer-components/benches/auditing.rs index 969e2275f4..d21422436f 100644 --- a/crates/subspace-farmer-components/benches/auditing.rs +++ b/crates/subspace-farmer-components/benches/auditing.rs @@ -128,7 +128,8 @@ pub fn criterion_benchmark(c: &mut Criterion) { sector_metadata_output: &mut plotted_sector_metadata_bytes, downloading_semaphore: black_box(None), encoding_semaphore: black_box(None), - table_generator: &mut table_generator, + table_generators: slice::from_mut(&mut table_generator), + abort_early: &Default::default(), })) .unwrap(); diff --git a/crates/subspace-farmer-components/benches/plotting.rs b/crates/subspace-farmer-components/benches/plotting.rs index 90800c0a1e..eb77765438 100644 --- a/crates/subspace-farmer-components/benches/plotting.rs +++ b/crates/subspace-farmer-components/benches/plotting.rs @@ -35,7 +35,16 @@ fn criterion_benchmark(c: &mut Criterion) { .expect("Not zero; qed"), ) .unwrap(); - let mut table_generator = PosTable::generator(); + let mut table_generators = [ + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + ]; let archived_history_segment = archiver .add_block( AsRef::<[u8]>::as_ref(input.as_ref()).to_vec(), @@ -79,7 +88,8 @@ fn criterion_benchmark(c: &mut Criterion) { sector_metadata_output: black_box(&mut sector_metadata_bytes), downloading_semaphore: black_box(None), encoding_semaphore: black_box(None), - table_generator: black_box(&mut table_generator), + table_generators: black_box(&mut table_generators), + abort_early: &Default::default(), })) .unwrap(); }) diff --git a/crates/subspace-farmer-components/benches/proving.rs b/crates/subspace-farmer-components/benches/proving.rs index d7468dc364..b9e40b1dc5 100644 --- a/crates/subspace-farmer-components/benches/proving.rs +++ b/crates/subspace-farmer-components/benches/proving.rs @@ -135,7 +135,8 @@ pub fn criterion_benchmark(c: &mut Criterion) { sector_metadata_output: &mut plotted_sector_metadata_bytes, downloading_semaphore: black_box(None), encoding_semaphore: black_box(None), - table_generator: &mut table_generator, + table_generators: slice::from_mut(&mut table_generator), + abort_early: &Default::default(), })) .unwrap(); diff --git a/crates/subspace-farmer-components/benches/reading.rs b/crates/subspace-farmer-components/benches/reading.rs index 87ec476f4f..5d7ab92202 100644 --- a/crates/subspace-farmer-components/benches/reading.rs +++ b/crates/subspace-farmer-components/benches/reading.rs @@ -6,7 +6,7 @@ use rand::prelude::*; use std::fs::OpenOptions; use std::io::Write; use std::num::{NonZeroU64, NonZeroUsize}; -use std::{env, fs}; +use std::{env, fs, slice}; use subspace_archiving::archiver::Archiver; use subspace_core_primitives::crypto::kzg; use subspace_core_primitives::crypto::kzg::Kzg; @@ -128,7 +128,8 @@ pub fn criterion_benchmark(c: &mut Criterion) { sector_metadata_output: &mut plotted_sector_metadata_bytes, downloading_semaphore: black_box(None), encoding_semaphore: black_box(None), - table_generator: &mut table_generator, + table_generators: slice::from_mut(&mut table_generator), + abort_early: &Default::default(), })) .unwrap(); diff --git a/crates/subspace-farmer-components/src/plotting.rs b/crates/subspace-farmer-components/src/plotting.rs index 837596f70e..58f43e6f27 100644 --- a/crates/subspace-farmer-components/src/plotting.rs +++ b/crates/subspace-farmer-components/src/plotting.rs @@ -1,30 +1,31 @@ use crate::sector::{ - sector_record_chunks_size, sector_size, RawSector, RecordMetadata, SectorContentsMap, - SectorMetadata, SectorMetadataChecksummed, + sector_record_chunks_size, sector_size, EncodedChunksUsed, RawSector, RecordMetadata, + SectorContentsMap, SectorMetadata, SectorMetadataChecksummed, }; use crate::segment_reconstruction::recover_missing_piece; use crate::{FarmerProtocolInfo, PieceGetter, PieceGetterRetryPolicy}; -use async_lock::Mutex; +use async_lock::Mutex as AsyncMutex; use backoff::future::retry; use backoff::{Error as BackoffError, ExponentialBackoff}; use futures::stream::FuturesUnordered; use futures::StreamExt; use parity_scale_codec::{Decode, Encode}; +use parking_lot::Mutex; use rayon::prelude::*; use std::mem; use std::simd::Simd; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::Duration; use subspace_core_primitives::crypto::kzg::Kzg; use subspace_core_primitives::crypto::{blake3_hash, blake3_hash_parallel, Scalar}; use subspace_core_primitives::{ - Blake3Hash, PieceIndex, PieceOffset, PublicKey, Record, SBucket, SectorId, SectorIndex, + Blake3Hash, PieceIndex, PieceOffset, PosSeed, PublicKey, Record, SBucket, SectorId, SectorIndex, }; use subspace_erasure_coding::ErasureCoding; use subspace_proof_of_space::{Table, TableGenerator}; use thiserror::Error; use tokio::sync::{AcquireError, Semaphore}; -use tokio::task::yield_now; use tracing::{debug, trace, warn}; const RECONSTRUCTION_CONCURRENCY_LIMIT: usize = 1; @@ -58,6 +59,9 @@ pub enum PlottingError { /// Invalid erasure coding instance #[error("Invalid erasure coding instance")] InvalidErasureCodingInstance, + /// No table generators + #[error("No table generators")] + NoTableGenerators, /// Bad sector output size #[error("Bad sector output size: provided {provided}, expected {expected}")] BadSectorOutputSize { @@ -101,6 +105,9 @@ pub enum PlottingError { #[from] error: AcquireError, }, + /// Abort early + #[error("Abort early")] + AbortEarly, } /// Options for plotting a sector. @@ -140,8 +147,10 @@ where /// Semaphore for part of the plotting when farmer encodes downloaded sector, should typically /// allow one permit at a time for efficient CPU utilization pub encoding_semaphore: Option<&'a Semaphore>, - /// Proof of space table generator - pub table_generator: &'a mut PosTable::Generator, + /// Proof of space table generators + pub table_generators: &'a mut [PosTable::Generator], + /// Whether encoding should be aborted early + pub abort_early: &'a AtomicBool, } /// Plot a single sector. @@ -170,7 +179,8 @@ where sector_metadata_output, downloading_semaphore, encoding_semaphore, - table_generator, + table_generators, + abort_early, } = options; let _downloading_permit = match downloading_semaphore { @@ -201,10 +211,10 @@ where pieces_in_sector, sector_output, sector_metadata_output, - table_generator, + table_generators, + abort_early, }, ) - .await } /// Opaque sector downloaded and ready for encoding @@ -268,12 +278,12 @@ where }) .collect::>(); - let raw_sector = Mutex::new(RawSector::new(pieces_in_sector)); + let raw_sector = AsyncMutex::new(RawSector::new(pieces_in_sector)); { // This list will be mutated, replacing pieces we have already processed with `None` let incremental_piece_indices = - Mutex::new(piece_indices.iter().copied().map(Some).collect::>()); + AsyncMutex::new(piece_indices.iter().copied().map(Some).collect::>()); retry(default_backoff(), || async { let mut raw_sector = raw_sector.lock().await; @@ -339,11 +349,13 @@ where /// Where plotted sector metadata should be written, vector must either be empty (in which case /// it'll be resized to correct size automatically) or correctly sized from the beginning pub sector_metadata_output: &'a mut Vec, - /// Proof of space table generator - pub table_generator: &'a mut PosTable::Generator, + /// Proof of space table generators + pub table_generators: &'a mut [PosTable::Generator], + /// Whether encoding should be aborted early + pub abort_early: &'a AtomicBool, } -pub async fn encode_sector( +pub fn encode_sector( downloaded_sector: DownloadedSector, encoding_options: EncodeSectorOptions<'_, PosTable>, ) -> Result @@ -362,13 +374,18 @@ where pieces_in_sector, sector_output, sector_metadata_output, - table_generator, + table_generators, + abort_early, } = encoding_options; if erasure_coding.max_shards() < Record::NUM_S_BUCKETS { return Err(PlottingError::InvalidErasureCodingInstance); } + if table_generators.is_empty() { + return Err(PlottingError::NoTableGenerators); + } + let sector_size = sector_size(pieces_in_sector); if !sector_output.is_empty() && sector_output.len() != sector_size { @@ -388,94 +405,51 @@ where } let mut sector_contents_map = SectorContentsMap::new(pieces_in_sector); - let mut chunks_scratch = Vec::with_capacity(Record::NUM_S_BUCKETS); - - for ((piece_offset, record), mut encoded_chunks_used) in (PieceOffset::ZERO..) - .zip(raw_sector.records.iter_mut()) - .zip(sector_contents_map.iter_record_bitfields_mut()) { - // Derive PoSpace table (use parallel mode because multiple tables concurrently will use - // too much RAM) - let pos_table = table_generator.generate_parallel( - §or_id.derive_evaluation_seed(piece_offset, farmer_protocol_info.history_size), + let iter = Mutex::new( + (PieceOffset::ZERO..) + .zip(raw_sector.records.iter_mut()) + .zip(sector_contents_map.iter_record_bitfields_mut()), ); - let source_record_chunks = record - .iter() - .map(|scalar_bytes| { - Scalar::try_from(scalar_bytes).expect( - "Piece getter must returns valid pieces of history that contain proper \ - scalar bytes; qed", - ) - }) - .collect::>(); - // Erasure code source record chunks - let parity_record_chunks = erasure_coding - .extend(&source_record_chunks) - .expect("Instance was verified to be able to work with this many values earlier; qed"); - - // For every erasure coded chunk check if there is quality present, if so then encode - // with PoSpace quality bytes and set corresponding `quality_present` bit to `true` - (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX)) - .into_par_iter() - .map(SBucket::from) - .zip( - source_record_chunks - .par_iter() - .interleave(&parity_record_chunks), - ) - .map(|(s_bucket, record_chunk)| { - let proof = pos_table.find_proof(s_bucket.into())?; - - Some(Simd::from(record_chunk.to_bytes()) ^ Simd::from(proof.hash())) - }) - .collect_into_vec(&mut chunks_scratch); - let num_successfully_encoded_chunks = chunks_scratch - .drain(..) - .zip(encoded_chunks_used.iter_mut()) - .filter_map(|(maybe_encoded_chunk, mut encoded_chunk_used)| { - let encoded_chunk = maybe_encoded_chunk?; - - *encoded_chunk_used = true; + rayon::scope(|scope| { + for table_generator in table_generators { + scope.spawn(|_scope| { + let mut chunks_scratch = Vec::with_capacity(Record::NUM_S_BUCKETS); + + loop { + // This instead of `while` above because otherwise mutex will be held for + // the duration of the loop and will limit concurrency to 1 table generator + let Some(((piece_offset, record), encoded_chunks_used)) = + iter.lock().next() + else { + return; + }; + let pos_seed = sector_id.derive_evaluation_seed( + piece_offset, + farmer_protocol_info.history_size, + ); + + record_encoding::( + &pos_seed, + record, + encoded_chunks_used, + table_generator, + erasure_coding, + &mut chunks_scratch, + ); + + if abort_early.load(Ordering::Relaxed) { + return; + } + } + }); + } + }); + } - Some(encoded_chunk) - }) - // Make sure above filter function (and corresponding `encoded_chunk_used` update) - // happen at most as many times as there is number of chunks in the record, - // otherwise `n+1` iterations could happen and update extra `encoded_chunk_used` - // unnecessarily causing issues down the line - .take(record.len()) - .zip(record.iter_mut()) - // Write encoded chunk back so we can reuse original allocation - .map(|(input_chunk, output_chunk)| { - *output_chunk = input_chunk.to_array(); - }) - .count(); - - // In some cases there is not enough PoSpace qualities available, in which case we add - // remaining number of unencoded erasure coded record chunks to the end - source_record_chunks - .iter() - .zip(&parity_record_chunks) - .flat_map(|(a, b)| [a, b]) - .zip(encoded_chunks_used.iter()) - // Skip chunks that were used previously - .filter_map(|(record_chunk, encoded_chunk_used)| { - if *encoded_chunk_used { - None - } else { - Some(record_chunk) - } - }) - // First `num_successfully_encoded_chunks` chunks are encoded - .zip(record.iter_mut().skip(num_successfully_encoded_chunks)) - // Write necessary number of unencoded chunks at the end - .for_each(|(input_chunk, output_chunk)| { - *output_chunk = input_chunk.to_bytes(); - }); - - // Give a chance to interrupt plotting if necessary in between pieces - yield_now().await + if abort_early.load(Ordering::Acquire) { + return Err(PlottingError::AbortEarly); } sector_output.resize(sector_size, 0); @@ -560,6 +534,95 @@ where }) } +fn record_encoding( + pos_seed: &PosSeed, + record: &mut Record, + mut encoded_chunks_used: EncodedChunksUsed<'_>, + table_generator: &mut PosTable::Generator, + erasure_coding: &ErasureCoding, + chunks_scratch: &mut Vec>>, +) where + PosTable: Table, +{ + // Derive PoSpace table + let pos_table = table_generator.generate_parallel(pos_seed); + + let source_record_chunks = record + .iter() + .map(|scalar_bytes| { + Scalar::try_from(scalar_bytes).expect( + "Piece getter must returns valid pieces of history that contain proper \ + scalar bytes; qed", + ) + }) + .collect::>(); + // Erasure code source record chunks + let parity_record_chunks = erasure_coding + .extend(&source_record_chunks) + .expect("Instance was verified to be able to work with this many values earlier; qed"); + + chunks_scratch.clear(); + // For every erasure coded chunk check if there is quality present, if so then encode + // with PoSpace quality bytes and set corresponding `quality_present` bit to `true` + (u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX)) + .into_par_iter() + .map(SBucket::from) + .zip( + source_record_chunks + .par_iter() + .interleave(&parity_record_chunks), + ) + .map(|(s_bucket, record_chunk)| { + let proof = pos_table.find_proof(s_bucket.into())?; + + Some(Simd::from(record_chunk.to_bytes()) ^ Simd::from(proof.hash())) + }) + .collect_into_vec(chunks_scratch); + let num_successfully_encoded_chunks = chunks_scratch + .drain(..) + .zip(encoded_chunks_used.iter_mut()) + .filter_map(|(maybe_encoded_chunk, mut encoded_chunk_used)| { + let encoded_chunk = maybe_encoded_chunk?; + + *encoded_chunk_used = true; + + Some(encoded_chunk) + }) + // Make sure above filter function (and corresponding `encoded_chunk_used` update) + // happen at most as many times as there is number of chunks in the record, + // otherwise `n+1` iterations could happen and update extra `encoded_chunk_used` + // unnecessarily causing issues down the line + .take(record.len()) + .zip(record.iter_mut()) + // Write encoded chunk back so we can reuse original allocation + .map(|(input_chunk, output_chunk)| { + *output_chunk = input_chunk.to_array(); + }) + .count(); + + // In some cases there is not enough PoSpace qualities available, in which case we add + // remaining number of unencoded erasure coded record chunks to the end + source_record_chunks + .iter() + .zip(&parity_record_chunks) + .flat_map(|(a, b)| [a, b]) + .zip(encoded_chunks_used.iter()) + // Skip chunks that were used previously + .filter_map(|(record_chunk, encoded_chunk_used)| { + if *encoded_chunk_used { + None + } else { + Some(record_chunk) + } + }) + // First `num_successfully_encoded_chunks` chunks are encoded + .zip(record.iter_mut().skip(num_successfully_encoded_chunks)) + // Write necessary number of unencoded chunks at the end + .for_each(|(input_chunk, output_chunk)| { + *output_chunk = input_chunk.to_bytes(); + }); +} + async fn download_sector_internal( raw_sector: &mut RawSector, piece_getter: &PG, diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs index 9848857a9f..220858e99f 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs @@ -2,13 +2,13 @@ mod dsn; mod metrics; use crate::commands::farm::dsn::configure_dsn; -use crate::commands::farm::metrics::FarmerMetrics; +use crate::commands::farm::metrics::{FarmerMetrics, SectorState}; use crate::utils::shutdown_signal; use anyhow::anyhow; use bytesize::ByteSize; use clap::{Parser, ValueHint}; use futures::channel::oneshot; -use futures::stream::FuturesUnordered; +use futures::stream::{FuturesOrdered, FuturesUnordered}; use futures::{FutureExt, StreamExt}; use parking_lot::Mutex; use prometheus_client::registry::Registry; @@ -22,14 +22,15 @@ use std::sync::Arc; use subspace_core_primitives::crypto::kzg::{embedded_kzg_settings, Kzg}; use subspace_core_primitives::{PublicKey, Record, SectorIndex}; use subspace_erasure_coding::ErasureCoding; -use subspace_farmer::piece_cache::PieceCache; +use subspace_farmer::farmer_cache::FarmerCache; use subspace_farmer::single_disk_farm::farming::FarmingNotification; use subspace_farmer::single_disk_farm::{ - SectorPlottingDetails, SectorUpdate, SingleDiskFarm, SingleDiskFarmError, SingleDiskFarmOptions, + SectorExpirationDetails, SectorPlottingDetails, SectorUpdate, SingleDiskFarm, + SingleDiskFarmError, SingleDiskFarmOptions, }; use subspace_farmer::utils::farmer_piece_getter::FarmerPieceGetter; use subspace_farmer::utils::piece_validator::SegmentCommitmentPieceValidator; -use subspace_farmer::utils::readers_and_pieces::ReadersAndPieces; +use subspace_farmer::utils::plotted_pieces::PlottedPieces; use subspace_farmer::utils::ss58::parse_ss58_reward_address; use subspace_farmer::utils::{ all_cpu_cores, create_plotting_thread_pool_manager, parse_cpu_cores_sets, @@ -111,7 +112,9 @@ pub(crate) struct FarmingArgs { prometheus_listen_on: Vec, /// Defines how many sectors farmer will download concurrently, allows to limit memory usage of /// the plotting process, defaults to `--sector-encoding-concurrency` + 1 to download future - /// sector ahead of time + /// sector ahead of time. + /// + /// Increase will result in higher memory usage. #[arg(long)] sector_downloading_concurrency: Option, /// Defines how many sectors farmer will encode concurrently, defaults to 1 on UMA system and @@ -119,8 +122,15 @@ pub(crate) struct FarmingArgs { /// restricted by /// `--sector-downloading-concurrency` and setting this option higher than /// `--sector-downloading-concurrency` will have no effect. + /// + /// Increase will result in higher memory usage. #[arg(long)] sector_encoding_concurrency: Option, + /// Defines how many record farmer will encode in a single sector concurrently, defaults to one + /// record per 2 cores, but not more than 8 in total. Higher concurrency means higher memory + /// usage and typically more efficient CPU utilization. + #[arg(long)] + record_encoding_concurrency: Option, /// Allows to enable farming during initial plotting. Not used by default on machines with 8 or /// less logical cores because plotting is so intense on CPU and memory that farming will likely /// not work properly, yet it will significantly impact plotting speed, delaying the time when @@ -174,6 +184,9 @@ pub(crate) struct FarmingArgs { /// each with a pair of CPU cores. #[arg(long, conflicts_with_all = &["sector_encoding_concurrency", "replotting_thread_pool_size"])] replotting_cpu_cores: Option, + /// Disable farm locking, for example if file system doesn't support it + #[arg(long)] + disable_farm_locking: bool, } fn cache_percentage_parser(s: &str) -> anyhow::Result { @@ -317,12 +330,14 @@ where prometheus_listen_on, sector_downloading_concurrency, sector_encoding_concurrency, + record_encoding_concurrency, farm_during_initial_plotting, farming_thread_pool_size, plotting_thread_pool_size, plotting_cpu_cores, replotting_thread_pool_size, replotting_cpu_cores, + disable_farm_locking, } = farming_args; // Override flags with `--dev` @@ -359,7 +374,7 @@ where None }; - let readers_and_pieces = Arc::new(Mutex::new(None)); + let plotted_pieces = Arc::new(Mutex::new(None)); info!(url = %node_rpc_url, "Connecting to node RPC"); let node_client = NodeRpcClient::new(&node_rpc_url).await?; @@ -379,7 +394,7 @@ where let keypair = derive_libp2p_keypair(identity.secret_key()); let peer_id = keypair.public().to_peer_id(); - let (piece_cache, piece_cache_worker) = PieceCache::new(node_client.clone(), peer_id); + let (farmer_cache, farmer_cache_worker) = FarmerCache::new(node_client.clone(), peer_id); // Metrics let mut prometheus_metrics_registry = Registry::default(); @@ -396,9 +411,9 @@ where first_farm_directory, keypair, dsn, - Arc::downgrade(&readers_and_pieces), + Arc::downgrade(&plotted_pieces), node_client.clone(), - piece_cache.clone(), + farmer_cache.clone(), should_start_prometheus_server.then_some(&mut prometheus_metrics_registry), )? }; @@ -428,20 +443,20 @@ where )); let piece_provider = PieceProvider::new(node.clone(), validator.clone()); - let piece_getter = Arc::new(FarmerPieceGetter::new( + let piece_getter = FarmerPieceGetter::new( piece_provider, - piece_cache.clone(), + farmer_cache.clone(), node_client.clone(), - Arc::clone(&readers_and_pieces), - )); + Arc::clone(&plotted_pieces), + ); - let piece_cache_worker_fut = run_future_in_dedicated_thread( + let farmer_cache_worker_fut = run_future_in_dedicated_thread( { - let future = piece_cache_worker.run(piece_getter.clone()); + let future = farmer_cache_worker.run(piece_getter.downgrade()); move || future }, - "cache-worker".to_string(), + "farmer-cache-worker".to_string(), )?; let mut single_disk_farms = Vec::with_capacity(disk_farms.len()); @@ -524,6 +539,15 @@ where .unwrap_or(plotting_thread_pool_core_indices.len() + 1), )); + let record_encoding_concurrency = record_encoding_concurrency.unwrap_or_else(|| { + let cpu_cores = plotting_thread_pool_core_indices + .first() + .expect("Guaranteed to have some CPU cores; qed"); + + NonZeroUsize::new((cpu_cores.cpu_cores().len() / 2).min(8)) + .expect("Guaranteed to have some CPU cores; qed") + }); + let plotting_thread_pool_manager = create_plotting_thread_pool_manager( plotting_thread_pool_core_indices .into_iter() @@ -554,10 +578,12 @@ where piece_getter: piece_getter.clone(), cache_percentage, downloading_semaphore: Arc::clone(&downloading_semaphore), + record_encoding_concurrency, farm_during_initial_plotting, farming_thread_pool_size, plotting_thread_pool_manager: plotting_thread_pool_manager.clone(), plotting_delay: Some(plotting_delay_receiver), + disable_farm_locking, }, disk_farm_index, ); @@ -600,7 +626,7 @@ where single_disk_farms.push(single_disk_farm); } - let cache_acknowledgement_receiver = piece_cache + let cache_acknowledgement_receiver = farmer_cache .replace_backing_caches( single_disk_farms .iter() @@ -608,7 +634,7 @@ where .collect(), ) .await; - drop(piece_cache); + drop(farmer_cache); // Wait for cache initialization before starting plotting tokio::spawn(async move { @@ -630,7 +656,7 @@ where // Collect already plotted pieces { - let mut future_readers_and_pieces = ReadersAndPieces::new(piece_readers); + let mut future_plotted_pieces = PlottedPieces::new(piece_readers); for (disk_farm_index, single_disk_farm) in single_disk_farms.iter().enumerate() { let disk_farm_index = disk_farm_index.try_into().map_err(|_error| { @@ -645,7 +671,7 @@ where .for_each( |(sector_index, plotted_sector_result)| match plotted_sector_result { Ok(plotted_sector) => { - future_readers_and_pieces.add_sector(disk_farm_index, &plotted_sector); + future_plotted_pieces.add_sector(disk_farm_index, &plotted_sector); } Err(error) => { error!( @@ -659,19 +685,32 @@ where ); } - readers_and_pieces.lock().replace(future_readers_and_pieces); + plotted_pieces.lock().replace(future_plotted_pieces); } info!("Finished collecting already plotted pieces successfully"); + let total_and_plotted_sectors = single_disk_farms + .iter() + .map(|single_disk_farm| async { + let total_sector_count = single_disk_farm.total_sectors_count(); + let plotted_sectors_count = single_disk_farm.plotted_sectors_count().await; + + (total_sector_count, plotted_sectors_count) + }) + .collect::>() + .collect::>() + .await; + let mut single_disk_farms_stream = single_disk_farms .into_iter() .enumerate() - .map(|(disk_farm_index, single_disk_farm)| { + .zip(total_and_plotted_sectors) + .map(|((disk_farm_index, single_disk_farm), sector_counts)| { let disk_farm_index = disk_farm_index.try_into().expect( "More than 256 plots are not supported, this is checked above already; qed", ); - let readers_and_pieces = Arc::clone(&readers_and_pieces); + let plotted_pieces = Arc::clone(&plotted_pieces); let span = info_span!("", %disk_farm_index); // Collect newly plotted pieces @@ -681,18 +720,29 @@ where let _span_guard = span.enter(); { - let mut readers_and_pieces = readers_and_pieces.lock(); - let readers_and_pieces = readers_and_pieces + let mut plotted_pieces = plotted_pieces.lock(); + let plotted_pieces = plotted_pieces .as_mut() .expect("Initial value was populated above; qed"); if let Some(old_plotted_sector) = &maybe_old_plotted_sector { - readers_and_pieces.delete_sector(disk_farm_index, old_plotted_sector); + plotted_pieces.delete_sector(disk_farm_index, old_plotted_sector); } - readers_and_pieces.add_sector(disk_farm_index, plotted_sector); + plotted_pieces.add_sector(disk_farm_index, plotted_sector); } }; + let (total_sector_count, plotted_sectors_count) = sector_counts; + farmer_metrics.update_sectors_total( + single_disk_farm.id(), + total_sector_count - plotted_sectors_count, + SectorState::NotPlotted, + ); + farmer_metrics.update_sectors_total( + single_disk_farm.id(), + plotted_sectors_count, + SectorState::Plotted, + ); single_disk_farm .on_sector_update(Arc::new({ let single_disk_farm_id = *single_disk_farm.id(); @@ -732,8 +782,24 @@ where on_plotted_sector_callback(plotted_sector, old_plotted_sector); farmer_metrics.observe_sector_plotting_time(&single_disk_farm_id, time); farmer_metrics.sector_plotted.inc(); + farmer_metrics + .update_sector_state(&single_disk_farm_id, SectorState::Plotted); + } + SectorUpdate::Expiration(SectorExpirationDetails::AboutToExpire) => { + farmer_metrics.update_sector_state( + &single_disk_farm_id, + SectorState::AboutToExpire, + ); + } + SectorUpdate::Expiration(SectorExpirationDetails::Expired) => { + farmer_metrics + .update_sector_state(&single_disk_farm_id, SectorState::Expired); + } + SectorUpdate::Expiration(SectorExpirationDetails::Determined { + .. + }) => { + // Not interested in here } - _ => {} } })) .detach(); @@ -770,7 +836,7 @@ where // Drop original instance such that the only remaining instances are in `SingleDiskFarm` // event handlers - drop(readers_and_pieces); + drop(plotted_pieces); let farm_fut = run_future_in_dedicated_thread( move || async move { @@ -792,11 +858,11 @@ where // This defines order in which things are dropped let networking_fut = networking_fut; let farm_fut = farm_fut; - let piece_cache_worker_fut = piece_cache_worker_fut; + let farmer_cache_worker_fut = farmer_cache_worker_fut; let networking_fut = pin!(networking_fut); let farm_fut = pin!(farm_fut); - let piece_cache_worker_fut = pin!(piece_cache_worker_fut); + let farmer_cache_worker_fut = pin!(farmer_cache_worker_fut); futures::select!( // Signal future @@ -813,8 +879,8 @@ where }, // Piece cache worker future - _ = piece_cache_worker_fut.fuse() => { - info!("Piece cache worker exited.") + _ = farmer_cache_worker_fut.fuse() => { + info!("Farmer cache worker exited.") }, ); diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm/dsn.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm/dsn.rs index 9b53350aaa..eb94cbe32a 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm/dsn.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm/dsn.rs @@ -4,9 +4,9 @@ use prometheus_client::registry::Registry; use std::collections::HashSet; use std::path::Path; use std::sync::{Arc, Weak}; +use subspace_farmer::farmer_cache::FarmerCache; use subspace_farmer::node_client::NodeClientExt; -use subspace_farmer::piece_cache::PieceCache; -use subspace_farmer::utils::readers_and_pieces::ReadersAndPieces; +use subspace_farmer::utils::plotted_pieces::PlottedPieces; use subspace_farmer::{NodeClient, NodeRpcClient, KNOWN_PEERS_CACHE_SIZE}; use subspace_networking::libp2p::identity::Keypair; use subspace_networking::libp2p::kad::RecordKey; @@ -43,11 +43,11 @@ pub(super) fn configure_dsn( external_addresses, disable_bootstrap_on_start, }: DsnArgs, - weak_readers_and_pieces: Weak>>, + weak_plotted_pieces: Weak>>, node_client: NodeRpcClient, - piece_cache: PieceCache, + farmer_cache: FarmerCache, prometheus_metrics_registry: Option<&mut Registry>, -) -> Result<(Node, NodeRunner), anyhow::Error> { +) -> Result<(Node, NodeRunner), anyhow::Error> { let networking_parameters_registry = KnownPeersManager::new(KnownPeersManagerConfig { path: Some(base_path.join("known_addresses.bin").into_boxed_path()), ignore_peer_list: strip_peer_id(bootstrap_nodes.clone()) @@ -62,7 +62,7 @@ pub(super) fn configure_dsn( let default_config = Config::new( protocol_prefix, keypair, - piece_cache.clone(), + farmer_cache.clone(), prometheus_metrics_registry, ); let config = Config { @@ -74,14 +74,14 @@ pub(super) fn configure_dsn( PieceByIndexRequestHandler::create(move |_, &PieceByIndexRequest { piece_index }| { debug!(?piece_index, "Piece request received. Trying cache..."); - let weak_readers_and_pieces = weak_readers_and_pieces.clone(); - let piece_cache = piece_cache.clone(); + let weak_plotted_pieces = weak_plotted_pieces.clone(); + let farmer_cache = farmer_cache.clone(); async move { let key = RecordKey::from(piece_index.to_multihash()); - let piece_from_store = piece_cache.get_piece(key).await; + let piece_from_cache = farmer_cache.get_piece(key).await; - if let Some(piece) = piece_from_store { + if let Some(piece) = piece_from_cache { Some(PieceByIndexResponse { piece: Some(piece) }) } else { debug!( @@ -90,16 +90,16 @@ pub(super) fn configure_dsn( ); let read_piece_fut = { - let readers_and_pieces = match weak_readers_and_pieces.upgrade() { - Some(readers_and_pieces) => readers_and_pieces, + let plotted_pieces = match weak_plotted_pieces.upgrade() { + Some(plotted_pieces) => plotted_pieces, None => { debug!("A readers and pieces are already dropped"); return None; } }; - let readers_and_pieces = readers_and_pieces.lock(); - let readers_and_pieces = match readers_and_pieces.as_ref() { - Some(readers_and_pieces) => readers_and_pieces, + let plotted_pieces = plotted_pieces.lock(); + let plotted_pieces = match plotted_pieces.as_ref() { + Some(plotted_pieces) => plotted_pieces, None => { debug!( ?piece_index, @@ -109,9 +109,7 @@ pub(super) fn configure_dsn( } }; - readers_and_pieces - .read_piece(&piece_index)? - .in_current_span() + plotted_pieces.read_piece(&piece_index)?.in_current_span() }; let piece = read_piece_fut.await; diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm/metrics.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm/metrics.rs index 6be9d3c5ec..4de52aa86a 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm/metrics.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm/metrics.rs @@ -1,12 +1,34 @@ use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::family::Family; +use prometheus_client::metrics::gauge::Gauge; use prometheus_client::metrics::histogram::{exponential_buckets, Histogram}; use prometheus_client::registry::{Registry, Unit}; -use std::sync::atomic::AtomicU64; +use std::fmt; +use std::sync::atomic::{AtomicI64, AtomicU64}; use std::time::Duration; +use subspace_core_primitives::SectorIndex; use subspace_farmer::single_disk_farm::farming::ProvingResult; use subspace_farmer::single_disk_farm::{FarmingError, SingleDiskFarmId}; +#[derive(Debug, Copy, Clone)] +pub(super) enum SectorState { + NotPlotted, + Plotted, + AboutToExpire, + Expired, +} + +impl fmt::Display for SectorState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match self { + Self::NotPlotted => "NotPlotted", + Self::Plotted => "Plotted", + Self::AboutToExpire => "AboutToExpire", + Self::Expired => "Expired", + }) + } +} + #[derive(Debug, Clone)] pub(super) struct FarmerMetrics { auditing_time: Family, Histogram>, @@ -16,6 +38,7 @@ pub(super) struct FarmerMetrics { sector_encoding_time: Family, Histogram>, sector_writing_time: Family, Histogram>, sector_plotting_time: Family, Histogram>, + sectors_total: Family, Gauge>, pub(super) sector_downloading: Counter, pub(super) sector_downloaded: Counter, pub(super) sector_encoding: Counter, @@ -31,7 +54,7 @@ impl FarmerMetrics { let sub_registry = registry.sub_registry_with_prefix("subspace_farmer"); let auditing_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.0001, 2.0, 15)) + Histogram::new(exponential_buckets(0.0002, 2.0, 15)) }); sub_registry.register_with_unit( @@ -42,7 +65,7 @@ impl FarmerMetrics { ); let proving_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.0001, 2.0, 15)) + Histogram::new(exponential_buckets(0.0002, 2.0, 15)) }); sub_registry.register_with_unit( @@ -61,7 +84,7 @@ impl FarmerMetrics { ); let sector_downloading_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.0001, 2.0, 15)) + Histogram::new(exponential_buckets(0.1, 2.0, 15)) }); sub_registry.register_with_unit( @@ -72,7 +95,7 @@ impl FarmerMetrics { ); let sector_encoding_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.0001, 2.0, 15)) + Histogram::new(exponential_buckets(0.1, 2.0, 15)) }); sub_registry.register_with_unit( @@ -83,7 +106,7 @@ impl FarmerMetrics { ); let sector_writing_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.0001, 2.0, 15)) + Histogram::new(exponential_buckets(0.0002, 2.0, 15)) }); sub_registry.register_with_unit( @@ -94,7 +117,7 @@ impl FarmerMetrics { ); let sector_plotting_time = Family::<_, _>::new_with_constructor(|| { - Histogram::new(exponential_buckets(0.0001, 2.0, 15)) + Histogram::new(exponential_buckets(0.1, 2.0, 15)) }); sub_registry.register_with_unit( @@ -104,6 +127,15 @@ impl FarmerMetrics { sector_plotting_time.clone(), ); + let sectors_total = Family::<_, _>::new_with_constructor(Gauge::<_, _>::default); + + sub_registry.register_with_unit( + "sectors_total", + "Total number of sectors with corresponding state", + Unit::Other("sectors".to_string()), + sectors_total.clone(), + ); + let sector_downloading = Counter::<_, _>::default(); sub_registry.register_with_unit( @@ -117,7 +149,7 @@ impl FarmerMetrics { sub_registry.register_with_unit( "sector_downloaded_counter", - "Number of sectors being downloaded", + "Number of downloaded sectors", Unit::Other("sectors".to_string()), sector_downloaded.clone(), ); @@ -126,7 +158,7 @@ impl FarmerMetrics { sub_registry.register_with_unit( "sector_encoding_counter", - "Number of sectors being downloaded", + "Number of sectors being encoded", Unit::Other("sectors".to_string()), sector_encoding.clone(), ); @@ -135,7 +167,7 @@ impl FarmerMetrics { sub_registry.register_with_unit( "sector_encoded_counter", - "Number of sectors being downloaded", + "Number of encoded sectors", Unit::Other("sectors".to_string()), sector_encoded.clone(), ); @@ -144,7 +176,7 @@ impl FarmerMetrics { sub_registry.register_with_unit( "sector_writing_counter", - "Number of sectors being downloaded", + "Number of sectors being written", Unit::Other("sectors".to_string()), sector_writing.clone(), ); @@ -153,7 +185,7 @@ impl FarmerMetrics { sub_registry.register_with_unit( "sector_written_counter", - "Number of sectors being downloaded", + "Number of written sectors", Unit::Other("sectors".to_string()), sector_written.clone(), ); @@ -162,7 +194,7 @@ impl FarmerMetrics { sub_registry.register_with_unit( "sector_plotting_counter", - "Number of sectors being downloaded", + "Number of sectors being plotted", Unit::Other("sectors".to_string()), sector_plotting.clone(), ); @@ -171,7 +203,7 @@ impl FarmerMetrics { sub_registry.register_with_unit( "sector_plotted_counter", - "Number of sectors being downloaded", + "Number of plotted sectors", Unit::Other("sectors".to_string()), sector_plotted.clone(), ); @@ -184,6 +216,7 @@ impl FarmerMetrics { sector_encoding_time, sector_writing_time, sector_plotting_time, + sectors_total, sector_downloading, sector_downloaded, sector_encoding, @@ -235,6 +268,79 @@ impl FarmerMetrics { .inc(); } + pub(super) fn update_sectors_total( + &self, + single_disk_farm_id: &SingleDiskFarmId, + sectors: SectorIndex, + state: SectorState, + ) { + self.sectors_total + .get_or_create(&vec![ + ("farm_id".to_string(), single_disk_farm_id.to_string()), + ("state".to_string(), state.to_string()), + ]) + .set(i64::from(sectors)); + } + + pub(super) fn update_sector_state( + &self, + single_disk_farm_id: &SingleDiskFarmId, + state: SectorState, + ) { + self.sectors_total + .get_or_create(&vec![ + ("farm_id".to_string(), single_disk_farm_id.to_string()), + ("state".to_string(), state.to_string()), + ]) + .inc(); + match state { + SectorState::NotPlotted => { + // Never called, doesn't make sense + } + SectorState::Plotted => { + // Separate blocks in because of mutex guard returned by `get_or_create` resulting + // in deadlock otherwise + { + let not_plotted_sectors = self.sectors_total.get_or_create(&vec![ + ("farm_id".to_string(), single_disk_farm_id.to_string()), + ("state".to_string(), SectorState::NotPlotted.to_string()), + ]); + if not_plotted_sectors.get() > 0 { + // Initial plotting + not_plotted_sectors.dec(); + return; + } + } + { + let expired_sectors = self.sectors_total.get_or_create(&vec![ + ("farm_id".to_string(), single_disk_farm_id.to_string()), + ("state".to_string(), SectorState::Expired.to_string()), + ]); + if expired_sectors.get() > 0 { + // Replaced expired sector + expired_sectors.dec(); + return; + } + } + // Replaced about to expire sector + self.sectors_total + .get_or_create(&vec![ + ("farm_id".to_string(), single_disk_farm_id.to_string()), + ("state".to_string(), SectorState::AboutToExpire.to_string()), + ]) + .dec(); + } + SectorState::AboutToExpire | SectorState::Expired => { + self.sectors_total + .get_or_create(&vec![ + ("farm_id".to_string(), single_disk_farm_id.to_string()), + ("state".to_string(), SectorState::Plotted.to_string()), + ]) + .dec(); + } + } + } + pub(super) fn observe_sector_downloading_time( &self, single_disk_farm_id: &SingleDiskFarmId, diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/scrub.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/scrub.rs index 44f7bb70c6..8af94c906c 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/scrub.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/scrub.rs @@ -3,7 +3,7 @@ use std::path::PathBuf; use subspace_farmer::single_disk_farm::SingleDiskFarm; use tracing::{error, info, info_span}; -pub(crate) fn scrub(disk_farms: &[PathBuf]) { +pub(crate) fn scrub(disk_farms: &[PathBuf], disable_farm_locking: bool) { disk_farms .into_par_iter() .enumerate() @@ -15,7 +15,7 @@ pub(crate) fn scrub(disk_farms: &[PathBuf]) { "Start scrubbing farm" ); - match SingleDiskFarm::scrub(directory) { + match SingleDiskFarm::scrub(directory, disable_farm_locking) { Ok(()) => { info!( path = %directory.display(), diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/main.rs b/crates/subspace-farmer/src/bin/subspace-farmer/main.rs index 588e078c5a..2953a387dd 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/main.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/main.rs @@ -42,6 +42,9 @@ enum Command { /// Example: /// /path/to/directory disk_farms: Vec, + /// Disable farm locking, for example if file system doesn't support it + #[arg(long)] + disable_farm_locking: bool, }, /// Wipes the farm Wipe { @@ -95,11 +98,14 @@ async fn main() -> anyhow::Result<()> { commands::info(disk_farms); } } - Command::Scrub { disk_farms } => { + Command::Scrub { + disk_farms, + disable_farm_locking, + } => { if disk_farms.is_empty() { info!("No farm was specified, so there is nothing to do"); } else { - commands::scrub(&disk_farms); + commands::scrub(&disk_farms, disable_farm_locking); } } Command::Wipe { disk_farms } => { diff --git a/crates/subspace-farmer/src/piece_cache.rs b/crates/subspace-farmer/src/farmer_cache.rs similarity index 97% rename from crates/subspace-farmer/src/piece_cache.rs rename to crates/subspace-farmer/src/farmer_cache.rs index 0dc5e11574..2c053c4045 100644 --- a/crates/subspace-farmer/src/piece_cache.rs +++ b/crates/subspace-farmer/src/farmer_cache.rs @@ -65,10 +65,10 @@ struct CacheWorkerState { last_segment_index: SegmentIndex, } -/// Cache worker used to drive the cache +/// Farmer cache worker used to drive the farmer cache backend #[derive(Debug)] -#[must_use = "Cache will not work unless its worker is running"] -pub struct CacheWorker +#[must_use = "Farmer cache will not work unless its worker is running"] +pub struct FarmerCacheWorker where NC: fmt::Debug, { @@ -79,11 +79,13 @@ where worker_receiver: Option>, } -impl CacheWorker +impl FarmerCacheWorker where NC: NodeClient, { - /// Run the cache worker with provided piece getter + /// Run the cache worker with provided piece getter. + /// + /// NOTE: Piece getter must not depend on farmer cache in order to avoid reference cycles! pub async fn run(mut self, piece_getter: PG) where PG: PieceGetter, @@ -754,23 +756,23 @@ where } } -/// Piece cache that aggregates caches of multiple disks +/// Farmer cache that aggregates different kinds of caches of multiple disks #[derive(Debug, Clone)] -pub struct PieceCache { +pub struct FarmerCache { peer_id: PeerId, /// Individual disk caches where pieces are stored caches: Arc>>, handlers: Arc, // We do not want to increase capacity unnecessarily on clone - worker_sender: mpsc::Sender, + worker_sender: Arc>, } -impl PieceCache { +impl FarmerCache { /// Create new piece cache instance and corresponding worker. /// /// NOTE: Returned future is async, but does blocking operations and should be running in /// dedicated thread. - pub fn new(node_client: NC, peer_id: PeerId) -> (Self, CacheWorker) + pub fn new(node_client: NC, peer_id: PeerId) -> (Self, FarmerCacheWorker) where NC: NodeClient, { @@ -782,9 +784,9 @@ impl PieceCache { peer_id, caches: Arc::clone(&caches), handlers: Arc::clone(&handlers), - worker_sender, + worker_sender: Arc::new(worker_sender), }; - let worker = CacheWorker { + let worker = FarmerCacheWorker { peer_id, node_client, caches, @@ -797,11 +799,10 @@ impl PieceCache { /// Get piece from cache pub async fn get_piece(&self, key: RecordKey) -> Option { - let caches = Arc::clone(&self.caches); - let maybe_piece_fut = tokio::task::spawn_blocking({ let key = key.clone(); - let worker_sender = self.worker_sender.clone(); + let caches = Arc::clone(&self.caches); + let worker_sender = Arc::clone(&self.worker_sender); move || { for (disk_farm_index, cache) in caches.read().iter().enumerate() { @@ -872,7 +873,7 @@ impl PieceCache { } } -impl LocalRecordProvider for PieceCache { +impl LocalRecordProvider for FarmerCache { fn record(&self, key: &RecordKey) -> Option { // It is okay to take read lock here, writes locks are very infrequent and very short for cache in self.caches.read().iter() { diff --git a/crates/subspace-farmer/src/piece_cache/tests.rs b/crates/subspace-farmer/src/farmer_cache/tests.rs similarity index 93% rename from crates/subspace-farmer/src/piece_cache/tests.rs rename to crates/subspace-farmer/src/farmer_cache/tests.rs index 8166a8a15f..9c44f91dbf 100644 --- a/crates/subspace-farmer/src/piece_cache/tests.rs +++ b/crates/subspace-farmer/src/farmer_cache/tests.rs @@ -1,5 +1,5 @@ +use crate::farmer_cache::FarmerCache; use crate::node_client::Error; -use crate::piece_cache::PieceCache; use crate::single_disk_farm::piece_cache::DiskPieceCache; use crate::NodeClient; use futures::channel::{mpsc, oneshot}; @@ -185,12 +185,13 @@ async fn basic() { let path2 = tempdir().unwrap(); { - let (piece_cache, piece_cache_worker) = - PieceCache::new(node_client.clone(), public_key.to_peer_id()); + let (farmer_cache, farmer_cache_worker) = + FarmerCache::new(node_client.clone(), public_key.to_peer_id()); - let piece_cache_worker_exited = tokio::spawn(piece_cache_worker.run(piece_getter.clone())); + let farmer_cache_worker_exited = + tokio::spawn(farmer_cache_worker.run(piece_getter.clone())); - let initialized_fut = piece_cache + let initialized_fut = farmer_cache .replace_backing_caches(vec![ DiskPieceCache::open(path1.as_ref(), 1).unwrap(), DiskPieceCache::open(path2.as_ref(), 1).unwrap(), @@ -208,14 +209,14 @@ async fn basic() { assert_eq!(requested_pieces, expected_pieces); for piece_index in requested_pieces { - piece_cache + farmer_cache .get_piece(RecordKey::from(piece_index.to_multihash())) .await .unwrap(); } // Other piece indices are not requested or cached - assert!(piece_cache + assert!(farmer_cache .get_piece(RecordKey::from(PieceIndex::from(10).to_multihash())) .await .is_none()); @@ -275,7 +276,7 @@ async fn basic() { let stored_pieces = vec![PieceIndex::from(196), PieceIndex::from(276)]; for piece_index in &stored_pieces { - piece_cache + farmer_cache .get_piece(RecordKey::from(piece_index.to_multihash())) .await .unwrap(); @@ -284,7 +285,7 @@ async fn basic() { for piece_index in requested_pieces { if !stored_pieces.contains(&piece_index) { // Other piece indices are not stored anymore - assert!(piece_cache + assert!(farmer_cache .get_piece(RecordKey::from(PieceIndex::from(10).to_multihash())) .await .is_none()); @@ -341,7 +342,7 @@ async fn basic() { let stored_pieces = vec![PieceIndex::from(823), PieceIndex::from(859)]; for piece_index in &stored_pieces { - piece_cache + farmer_cache .get_piece(RecordKey::from(piece_index.to_multihash())) .await .unwrap(); @@ -350,7 +351,7 @@ async fn basic() { for piece_index in requested_pieces { if !stored_pieces.contains(&piece_index) { // Other piece indices are not stored anymore - assert!(piece_cache + assert!(farmer_cache .get_piece(RecordKey::from(PieceIndex::from(10).to_multihash())) .await .is_none()); @@ -358,28 +359,28 @@ async fn basic() { } } - drop(piece_cache); + drop(farmer_cache); - piece_cache_worker_exited.await.unwrap(); + farmer_cache_worker_exited.await.unwrap(); } { // Clear requested pieces pieces.lock().clear(); - let (piece_cache, piece_cache_worker) = - PieceCache::new(node_client.clone(), public_key.to_peer_id()); + let (farmer_cache, farmer_cache_worker) = + FarmerCache::new(node_client.clone(), public_key.to_peer_id()); - let piece_cache_worker_exited = tokio::spawn(piece_cache_worker.run(piece_getter)); + let farmer_cache_worker_exited = tokio::spawn(farmer_cache_worker.run(piece_getter)); // Reopen with the same backing caches - let initialized_fut = piece_cache + let initialized_fut = farmer_cache .replace_backing_caches(vec![ DiskPieceCache::open(path1.as_ref(), 1).unwrap(), DiskPieceCache::open(path2.as_ref(), 1).unwrap(), ]) .await; - drop(piece_cache); + drop(farmer_cache); // Wait for piece cache to be initialized initialized_fut.await.unwrap(); @@ -398,6 +399,6 @@ async fn basic() { // Make worker exit archived_segment_headers_sender.close().await.unwrap(); - piece_cache_worker_exited.await.unwrap(); + farmer_cache_worker_exited.await.unwrap(); } } diff --git a/crates/subspace-farmer/src/lib.rs b/crates/subspace-farmer/src/lib.rs index bd33951aaf..c7a222b9ef 100644 --- a/crates/subspace-farmer/src/lib.rs +++ b/crates/subspace-farmer/src/lib.rs @@ -36,9 +36,9 @@ //! are `target ± ½ * solution range` (while also handing overflow/underflow) when interpreted as //! 64-bit unsigned integers. +pub mod farmer_cache; pub(crate) mod identity; pub mod node_client; -pub mod piece_cache; pub mod reward_signing; pub mod single_disk_farm; pub mod thread_pool_manager; diff --git a/crates/subspace-farmer/src/single_disk_farm.rs b/crates/subspace-farmer/src/single_disk_farm.rs index 06d746b605..4a1895aa64 100644 --- a/crates/subspace-farmer/src/single_disk_farm.rs +++ b/crates/subspace-farmer/src/single_disk_farm.rs @@ -38,7 +38,7 @@ use std::error::Error; use std::fs::{File, OpenOptions}; use std::future::Future; use std::io::{Seek, SeekFrom}; -use std::num::NonZeroU8; +use std::num::{NonZeroU8, NonZeroUsize}; use std::path::{Path, PathBuf}; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -277,6 +277,8 @@ pub struct SingleDiskFarmOptions { /// Semaphore for part of the plotting when farmer downloads new sector, allows to limit memory /// usage of the plotting process, permit will be held until the end of the plotting process pub downloading_semaphore: Arc, + /// Defines how many record farmer will encode in a single sector concurrently + pub record_encoding_concurrency: NonZeroUsize, /// Whether to farm during initial plotting pub farm_during_initial_plotting: bool, /// Thread pool size used for farming (mostly for blocking I/O, but also for some @@ -287,6 +289,8 @@ pub struct SingleDiskFarmOptions { /// Notification for plotter to start, can be used to delay plotting until some initialization /// has happened externally pub plotting_delay: Option>, + /// Disable farm locking, for example if file system doesn't support it + pub disable_farm_locking: bool, } /// Errors happening when trying to create/open single disk farm @@ -567,7 +571,7 @@ pub struct SingleDiskFarm { start_sender: Option>, /// Sender that will be used to signal to background threads that they must stop stop_sender: Option>, - _single_disk_farm_info_lock: SingleDiskFarmInfoLock, + _single_disk_farm_info_lock: Option, } impl Drop for SingleDiskFarm { @@ -609,10 +613,12 @@ impl SingleDiskFarm { erasure_coding, cache_percentage, downloading_semaphore, + record_encoding_concurrency, farming_thread_pool_size, plotting_thread_pool_manager, plotting_delay, farm_during_initial_plotting, + disable_farm_locking, } = options; fs::create_dir_all(&directory)?; @@ -691,8 +697,14 @@ impl SingleDiskFarm { } }; - let single_disk_farm_info_lock = SingleDiskFarmInfo::try_lock(&directory) - .map_err(SingleDiskFarmError::LikelyAlreadyInUse)?; + let single_disk_farm_info_lock = if disable_farm_locking { + None + } else { + Some( + SingleDiskFarmInfo::try_lock(&directory) + .map_err(SingleDiskFarmError::LikelyAlreadyInUse)?, + ) + }; let pieces_in_sector = single_disk_farm_info.pieces_in_sector(); let sector_size = sector_size(pieces_in_sector); @@ -934,8 +946,9 @@ impl SingleDiskFarm { modifying_sector_index, sectors_to_plot_receiver, downloading_semaphore, + record_encoding_concurrency, plotting_thread_pool_manager, - stop_receiver: &mut stop_receiver.resubscribe(), + stop_receiver: stop_receiver.resubscribe(), }; let plotting_fut = async { @@ -1275,8 +1288,13 @@ impl SingleDiskFarm { } /// Number of sectors successfully plotted so far - pub async fn plotted_sectors_count(&self) -> usize { - self.sectors_metadata.read().await.len() + pub async fn plotted_sectors_count(&self) -> SectorIndex { + self.sectors_metadata + .read() + .await + .len() + .try_into() + .expect("Number of sectors never exceeds `SectorIndex` type; qed") } /// Read information about sectors plotted so far @@ -1409,7 +1427,10 @@ impl SingleDiskFarm { /// Check the farm for corruption and repair errors (caused by disk errors or something else), /// returns an error when irrecoverable errors occur. - pub fn scrub(directory: &Path) -> Result<(), SingleDiskFarmScrubError> { + pub fn scrub( + directory: &Path, + disable_farm_locking: bool, + ) -> Result<(), SingleDiskFarmScrubError> { let span = Span::current(); let info = { @@ -1427,8 +1448,14 @@ impl SingleDiskFarm { } }; - let _single_disk_farm_info_lock = SingleDiskFarmInfo::try_lock(directory) - .map_err(SingleDiskFarmScrubError::LikelyAlreadyInUse)?; + let _single_disk_farm_info_lock = if disable_farm_locking { + None + } else { + Some( + SingleDiskFarmInfo::try_lock(directory) + .map_err(SingleDiskFarmScrubError::LikelyAlreadyInUse)?, + ) + }; let identity = { let file = directory.join(Identity::FILE_NAME); diff --git a/crates/subspace-farmer/src/single_disk_farm/farming.rs b/crates/subspace-farmer/src/single_disk_farm/farming.rs index 36c48762b2..3b39150ef7 100644 --- a/crates/subspace-farmer/src/single_disk_farm/farming.rs +++ b/crates/subspace-farmer/src/single_disk_farm/farming.rs @@ -105,6 +105,9 @@ pub enum FarmingError { /// Lower-level error error: node_client::Error, }, + /// Slot info notification stream ended + #[error("Slot info notification stream ended")] + SlotNotificationStreamEnded, /// Low-level auditing error #[error("Low-level auditing error: {0}")] LowLevelAuditing(#[from] AuditingError), @@ -150,6 +153,7 @@ impl FarmingError { FarmingError::Io(_) => "Io", FarmingError::FailedToCreateThreadPool(_) => "FailedToCreateThreadPool", FarmingError::Decoded(_) => "Decoded", + FarmingError::SlotNotificationStreamEnded => "SlotNotificationStreamEnded", } } @@ -163,6 +167,7 @@ impl FarmingError { FarmingError::Io(_) => true, FarmingError::FailedToCreateThreadPool(_) => true, FarmingError::Decoded(error) => error.is_fatal, + FarmingError::SlotNotificationStreamEnded => true, } } } @@ -193,7 +198,7 @@ where } } - Ok(()) + Err(FarmingError::SlotNotificationStreamEnded) } /// Plot audit options diff --git a/crates/subspace-farmer/src/single_disk_farm/piece_cache.rs b/crates/subspace-farmer/src/single_disk_farm/piece_cache.rs index 45d763f770..408855260c 100644 --- a/crates/subspace-farmer/src/single_disk_farm/piece_cache.rs +++ b/crates/subspace-farmer/src/single_disk_farm/piece_cache.rs @@ -48,7 +48,8 @@ struct Inner { num_elements: usize, } -/// Piece cache stored on one disk +/// Dedicated piece cache stored on one disk, is used both to accelerate DSN queries and to plot +/// faster #[derive(Debug, Clone)] pub struct DiskPieceCache { inner: Arc, @@ -57,17 +58,7 @@ pub struct DiskPieceCache { impl DiskPieceCache { pub(super) const FILE_NAME: &'static str = "piece_cache.bin"; - #[cfg(not(test))] - pub(super) fn open(directory: &Path, capacity: usize) -> Result { - Self::open_internal(directory, capacity) - } - - #[cfg(test)] - pub(crate) fn open(directory: &Path, capacity: usize) -> Result { - Self::open_internal(directory, capacity) - } - - pub(super) fn open_internal( + pub(in super::super) fn open( directory: &Path, capacity: usize, ) -> Result { diff --git a/crates/subspace-farmer/src/single_disk_farm/plotting.rs b/crates/subspace-farmer/src/single_disk_farm/plotting.rs index b86bd47fb3..e00c6705cd 100644 --- a/crates/subspace-farmer/src/single_disk_farm/plotting.rs +++ b/crates/subspace-farmer/src/single_disk_farm/plotting.rs @@ -15,8 +15,7 @@ use std::fs::File; use std::io; use std::num::{NonZeroU16, NonZeroUsize}; use std::ops::Range; -use std::pin::pin; -use std::sync::atomic::Ordering; +use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::time::{Duration, Instant}; use subspace_core_primitives::crypto::kzg::Kzg; @@ -34,7 +33,6 @@ use subspace_farmer_components::sector::SectorMetadataChecksummed; use subspace_farmer_components::{plotting, PieceGetter, PieceGetterRetryPolicy}; use subspace_proof_of_space::Table; use thiserror::Error; -use tokio::runtime::Handle; use tokio::sync::{broadcast, OwnedSemaphorePermit, Semaphore}; use tokio::task::yield_now; use tracing::{debug, info, trace, warn, Instrument}; @@ -131,9 +129,6 @@ pub enum PlottingError { /// Lower-level error error: node_client::Error, }, - /// Farm is shutting down - #[error("Farm is shutting down")] - FarmIsShuttingDown, /// Low-level plotting error #[error("Low-level plotting error: {0}")] LowLevel(#[from] plotting::PlottingError), @@ -164,8 +159,9 @@ pub(super) struct PlottingOptions<'a, NC, PG> { /// Semaphore for part of the plotting when farmer downloads new sector, allows to limit memory /// usage of the plotting process, permit will be held until the end of the plotting process pub(crate) downloading_semaphore: Arc, + pub(crate) record_encoding_concurrency: NonZeroUsize, pub(super) plotting_thread_pool_manager: PlottingThreadPoolManager, - pub(super) stop_receiver: &'a mut broadcast::Receiver<()>, + pub(super) stop_receiver: broadcast::Receiver<()>, } /// Starts plotting process. @@ -197,11 +193,30 @@ where modifying_sector_index, mut sectors_to_plot_receiver, downloading_semaphore, + record_encoding_concurrency, plotting_thread_pool_manager, - stop_receiver, + mut stop_receiver, } = plotting_options; - let mut table_generator = PosTable::generator(); + let abort_early = Arc::new(AtomicBool::new(false)); + + let _abort_early_task = AsyncJoinOnDrop::new( + tokio::spawn({ + let abort_early = Arc::clone(&abort_early); + + async move { + // Error doesn't matter here + let _ = stop_receiver.recv().await; + + abort_early.store(true, Ordering::Release); + } + }), + true, + ); + + let mut table_generators = (0..record_encoding_concurrency.get()) + .map(|_| PosTable::generator()) + .collect::>(); let mut maybe_next_downloaded_sector_fut = None::< AsyncJoinOnDrop>, @@ -365,7 +380,7 @@ where let sector_metadata; let plotted_sector; - (sector, sector_metadata, table_generator, plotted_sector) = { + (sector, sector_metadata, table_generators, plotted_sector) = { let plotting_fn = || { tokio::task::block_in_place(|| { let mut sector = Vec::new(); @@ -378,37 +393,25 @@ where let start = Instant::now(); - let plotted_sector = { - let plot_sector_fut = pin!(encode_sector::( - downloaded_sector, - EncodeSectorOptions { - sector_index, - erasure_coding, - pieces_in_sector, - sector_output: &mut sector, - sector_metadata_output: &mut sector_metadata, - table_generator: &mut table_generator, - }, - )); - - Handle::current().block_on(async { - select! { - plotting_result = plot_sector_fut.fuse() => { - plotting_result.map_err(PlottingError::from) - } - _ = stop_receiver.recv().fuse() => { - Err(PlottingError::FarmIsShuttingDown) - } - } - })? - }; + let plotted_sector = encode_sector::( + downloaded_sector, + EncodeSectorOptions { + sector_index, + erasure_coding, + pieces_in_sector, + sector_output: &mut sector, + sector_metadata_output: &mut sector_metadata, + table_generators: &mut table_generators, + abort_early: &abort_early, + }, + )?; handlers.sector_update.call_simple(&( sector_index, SectorUpdate::Plotting(SectorPlottingDetails::Encoded(start.elapsed())), )); - Ok((sector, sector_metadata, table_generator, plotted_sector)) + Ok((sector, sector_metadata, table_generators, plotted_sector)) }) }; @@ -424,7 +427,10 @@ where let plotting_result = thread_pool.install(plotting_fn); - if matches!(plotting_result, Err(PlottingError::FarmIsShuttingDown)) { + if matches!( + plotting_result, + Err(PlottingError::LowLevel(plotting::PlottingError::AbortEarly)) + ) { return Ok(()); } diff --git a/crates/subspace-farmer/src/utils.rs b/crates/subspace-farmer/src/utils.rs index bebcef560a..a28671afd6 100644 --- a/crates/subspace-farmer/src/utils.rs +++ b/crates/subspace-farmer/src/utils.rs @@ -1,6 +1,6 @@ pub mod farmer_piece_getter; pub mod piece_validator; -pub mod readers_and_pieces; +pub mod plotted_pieces; pub mod ss58; #[cfg(test)] mod tests; diff --git a/crates/subspace-farmer/src/utils/farmer_piece_getter.rs b/crates/subspace-farmer/src/utils/farmer_piece_getter.rs index 5944613408..552e306504 100644 --- a/crates/subspace-farmer/src/utils/farmer_piece_getter.rs +++ b/crates/subspace-farmer/src/utils/farmer_piece_getter.rs @@ -1,10 +1,10 @@ -use crate::piece_cache::PieceCache; -use crate::utils::readers_and_pieces::ReadersAndPieces; +use crate::farmer_cache::FarmerCache; +use crate::utils::plotted_pieces::PlottedPieces; use crate::NodeClient; use async_trait::async_trait; use parking_lot::Mutex; use std::error::Error; -use std::sync::Arc; +use std::sync::{Arc, Weak}; use subspace_core_primitives::{Piece, PieceIndex}; use subspace_farmer_components::{PieceGetter, PieceGetterRetryPolicy}; use subspace_networking::libp2p::kad::RecordKey; @@ -14,25 +14,47 @@ use tracing::{debug, error, trace}; const MAX_RANDOM_WALK_ROUNDS: usize = 15; -pub struct FarmerPieceGetter { +struct Inner { piece_provider: PieceProvider, - piece_cache: PieceCache, + farmer_cache: FarmerCache, node_client: NC, - readers_and_pieces: Arc>>, + plotted_pieces: Arc>>, +} + +pub struct FarmerPieceGetter { + inner: Arc>, +} + +impl Clone for FarmerPieceGetter { + fn clone(&self) -> Self { + Self { + inner: Arc::clone(&self.inner), + } + } } impl FarmerPieceGetter { pub fn new( piece_provider: PieceProvider, - piece_cache: PieceCache, + farmer_cache: FarmerCache, node_client: NC, - readers_and_pieces: Arc>>, + plotted_pieces: Arc>>, ) -> Self { Self { - piece_provider, - piece_cache, - node_client, - readers_and_pieces, + inner: Arc::new(Inner { + piece_provider, + farmer_cache, + node_client, + plotted_pieces, + }), + } + } + + /// Downgrade to [`WeakFarmerPieceGetter`] in order to break reference cycles with internally + /// used [`Arc`] + pub fn downgrade(&self) -> WeakFarmerPieceGetter { + WeakFarmerPieceGetter { + inner: Arc::downgrade(&self.inner), } } @@ -57,15 +79,17 @@ where ) -> Result, Box> { let key = RecordKey::from(piece_index.to_multihash()); - trace!(%piece_index, "Getting piece from local cache"); - if let Some(piece) = self.piece_cache.get_piece(key).await { - trace!(%piece_index, "Got piece from local cache successfully"); + let inner = &self.inner; + + trace!(%piece_index, "Getting piece from farmer cache"); + if let Some(piece) = inner.farmer_cache.get_piece(key).await { + trace!(%piece_index, "Got piece from farmer cache successfully"); return Ok(Some(piece)); } // L2 piece acquisition trace!(%piece_index, "Getting piece from DSN L2 cache"); - let maybe_piece = self + let maybe_piece = inner .piece_provider .get_piece_from_dsn_cache(piece_index, Self::convert_retry_policy(retry_policy)) .await?; @@ -77,7 +101,7 @@ where // Try node's RPC before reaching to L1 (archival storage on DSN) trace!(%piece_index, "Getting piece from node"); - match self.node_client.piece(piece_index).await { + match inner.node_client.piece(piece_index).await { Ok(Some(piece)) => { trace!(%piece_index, "Got piece from node successfully"); return Ok(Some(piece)); @@ -95,11 +119,11 @@ where } trace!(%piece_index, "Getting piece from local plot"); - let maybe_read_piece_fut = self - .readers_and_pieces + let maybe_read_piece_fut = inner + .plotted_pieces .lock() .as_ref() - .and_then(|readers_and_pieces| readers_and_pieces.read_piece(&piece_index)); + .and_then(|plotted_pieces| plotted_pieces.read_piece(&piece_index)); if let Some(read_piece_fut) = maybe_read_piece_fut { if let Some(piece) = read_piece_fut.await { @@ -111,7 +135,7 @@ where // L1 piece acquisition trace!(%piece_index, "Getting piece from DSN L1."); - let archival_storage_search_result = self + let archival_storage_search_result = inner .piece_provider .get_piece_from_archival_storage(piece_index, MAX_RANDOM_WALK_ROUNDS) .await; @@ -129,3 +153,46 @@ where Ok(None) } } + +/// Weak farmer piece getter, can be upgraded to [`FarmerPieceGetter`] +#[derive(Debug)] +pub struct WeakFarmerPieceGetter { + inner: Weak>, +} + +impl Clone for WeakFarmerPieceGetter { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +#[async_trait] +impl PieceGetter for WeakFarmerPieceGetter +where + PV: PieceValidator + Send + 'static, + NC: NodeClient, +{ + async fn get_piece( + &self, + piece_index: PieceIndex, + retry_policy: PieceGetterRetryPolicy, + ) -> Result, Box> { + let Some(piece_getter) = self.upgrade() else { + debug!("Farmer piece getter upgrade didn't succeed"); + return Ok(None); + }; + + piece_getter.get_piece(piece_index, retry_policy).await + } +} + +impl WeakFarmerPieceGetter { + /// Try to upgrade to [`FarmerPieceGetter`] if there is at least one other instance of it alive + pub fn upgrade(&self) -> Option> { + Some(FarmerPieceGetter { + inner: self.inner.upgrade()?, + }) + } +} diff --git a/crates/subspace-farmer/src/utils/readers_and_pieces.rs b/crates/subspace-farmer/src/utils/plotted_pieces.rs similarity index 92% rename from crates/subspace-farmer/src/utils/readers_and_pieces.rs rename to crates/subspace-farmer/src/utils/plotted_pieces.rs index 54f9edf626..809aebfed4 100644 --- a/crates/subspace-farmer/src/utils/readers_and_pieces.rs +++ b/crates/subspace-farmer/src/utils/plotted_pieces.rs @@ -13,14 +13,15 @@ struct PieceDetails { piece_offset: PieceOffset, } -/// Wrapper data structure for pieces plotted under multiple plots and corresponding piece readers. +/// Wrapper data structure for pieces plotted under multiple plots. #[derive(Debug)] -pub struct ReadersAndPieces { +pub struct PlottedPieces { readers: Vec, pieces: HashMap>, } -impl ReadersAndPieces { +impl PlottedPieces { + /// Initialize with readers for each farm pub fn new(readers: Vec) -> Self { Self { readers, @@ -33,7 +34,7 @@ impl ReadersAndPieces { self.pieces.contains_key(piece_index) } - /// Read piece from one of the associated readers. + /// Read plotted piece from oneof the farms. /// /// If piece doesn't exist `None` is returned, if by the time future is polled piece is no /// longer in the plot, future will resolve with `None`. @@ -69,6 +70,7 @@ impl ReadersAndPieces { }) } + /// Add new sector to collect plotted pieces pub fn add_sector(&mut self, disk_farm_index: u8, plotted_sector: &PlottedSector) { for (piece_offset, &piece_index) in (PieceOffset::ZERO..).zip(plotted_sector.piece_indexes.iter()) @@ -90,6 +92,7 @@ impl ReadersAndPieces { } } + /// Add old sector from plotted pieces (happens on replotting) pub fn delete_sector(&mut self, disk_farm_index: u8, plotted_sector: &PlottedSector) { for (piece_offset, &piece_index) in (PieceOffset::ZERO..).zip(plotted_sector.piece_indexes.iter()) @@ -121,6 +124,7 @@ impl ReadersAndPieces { } } + /// Iterator over all unique piece indices plotted pub fn piece_indices(&self) -> impl Iterator { self.pieces.keys() } diff --git a/crates/subspace-malicious-operator/src/chain_spec.rs b/crates/subspace-malicious-operator/src/chain_spec.rs index 45ff978c72..58822de0e4 100644 --- a/crates/subspace-malicious-operator/src/chain_spec.rs +++ b/crates/subspace-malicious-operator/src/chain_spec.rs @@ -1,3 +1,4 @@ +use domain_runtime_primitives::{AccountId20Converter, MultiAccountId}; use evm_domain_runtime::{AccountId as AccountId20, EVMChainIdConfig, EVMConfig, Precompiles}; use hex_literal::hex; use parity_scale_codec::Encode; @@ -7,7 +8,7 @@ use sp_core::crypto::AccountId32; use sp_core::{sr25519, Pair, Public}; use sp_domains::storage::RawGenesis; use sp_domains::{OperatorAllowList, OperatorPublicKey, RuntimeType}; -use sp_runtime::traits::IdentifyAccount; +use sp_runtime::traits::{Convert, IdentifyAccount}; use sp_runtime::{BuildStorage, MultiSigner, Percent}; use std::marker::PhantomData; use std::num::NonZeroU32; @@ -17,8 +18,8 @@ use subspace_runtime::{ }; use subspace_runtime_primitives::{AccountId, Balance, BlockNumber, SSC}; -pub fn domain_dev_config() -> GenericChainSpec { - let endowed_accounts = [ +fn endowed_accounts() -> Vec<(MultiAccountId, Balance)> { + [ // Alith key AccountId20::from(hex!("f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac")), // Baltathar key @@ -27,8 +28,15 @@ pub fn domain_dev_config() -> GenericChainSpec GenericChainSpec { + // Alith is sudo account + let sudo_account = AccountId20::from(hex!("f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac")); // TODO: Migrate once https://github.com/paritytech/polkadot-sdk/issues/2963 is un-broken #[allow(deprecated)] @@ -50,14 +58,7 @@ pub fn domain_dev_config() -> GenericChainSpec, operator_signing_key: OperatorPublicKey, raw_genesis_storage: Vec, + initial_balances: Vec<(MultiAccountId, Balance)>, } pub fn dev_config() -> Result, String> { @@ -198,6 +200,7 @@ pub fn dev_config() -> Result("Alice"), raw_genesis_storage: raw_genesis_storage.clone(), + initial_balances: endowed_accounts(), }, ) }, @@ -277,6 +280,7 @@ fn subspace_genesis_config( signing_key: genesis_domain_params.operator_signing_key, nomination_tax: Percent::from_percent(5), minimum_nominator_stake: 100 * SSC, + initial_balances: genesis_domain_params.initial_balances, }), }, } diff --git a/crates/subspace-malicious-operator/src/malicious_bundle_producer.rs b/crates/subspace-malicious-operator/src/malicious_bundle_producer.rs index bc197707a2..8722012be5 100644 --- a/crates/subspace-malicious-operator/src/malicious_bundle_producer.rs +++ b/crates/subspace-malicious-operator/src/malicious_bundle_producer.rs @@ -3,7 +3,6 @@ use domain_client_operator::domain_bundle_producer::DomainBundleProducer; use domain_client_operator::domain_bundle_proposer::DomainBundleProposer; use domain_client_operator::{OpaqueBundleFor, OperatorSlotInfo}; use domain_runtime_primitives::opaque::Block as DomainBlock; -use domain_runtime_primitives::DomainCoreApi; use frame_system_rpc_runtime_api::AccountNonceApi; use futures::{Stream, StreamExt, TryFutureExt}; use pallet_domains::OperatorConfig; @@ -20,6 +19,7 @@ use sp_consensus_slots::Slot; use sp_consensus_subspace::FarmerPublicKey; use sp_core::crypto::UncheckedFrom; use sp_core::Get; +use sp_domains::core_api::DomainCoreApi; use sp_domains::{BundleProducerElectionApi, DomainId, DomainsApi, OperatorId, OperatorPublicKey}; use sp_keyring::Sr25519Keyring; use sp_keystore::KeystorePtr; diff --git a/crates/subspace-malicious-operator/src/malicious_bundle_tamper.rs b/crates/subspace-malicious-operator/src/malicious_bundle_tamper.rs index 2d2690b65b..0ba3f11ffb 100644 --- a/crates/subspace-malicious-operator/src/malicious_bundle_tamper.rs +++ b/crates/subspace-malicious-operator/src/malicious_bundle_tamper.rs @@ -1,12 +1,13 @@ use domain_client_operator::{ExecutionReceiptFor, OpaqueBundleFor}; -use domain_runtime_primitives::{BlockFees, DomainCoreApi}; use parity_scale_codec::{Decode, Encode}; use sc_client_api::HeaderBackend; use sp_api::ProvideRuntimeApi; use sp_domain_digests::AsPredigest; +use sp_domains::core_api::DomainCoreApi; use sp_domains::merkle_tree::MerkleTree; use sp_domains::{ - BundleValidity, HeaderHashingFor, InvalidBundleType, OperatorPublicKey, OperatorSignature, + BlockFees, BundleValidity, HeaderHashingFor, InvalidBundleType, OperatorPublicKey, + OperatorSignature, }; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor, One, Zero}; @@ -124,7 +125,8 @@ where match bad_receipt_type { BadReceiptType::BlockFees => { - receipt.block_fees = BlockFees::new(random_seed.into(), random_seed.into()); + receipt.block_fees = + BlockFees::new(random_seed.into(), random_seed.into(), random_seed.into()); } // TODO: modify the length of `execution_trace` once the honest operator can handle BadReceiptType::ExecutionTrace => { diff --git a/crates/subspace-node/src/chain_spec.rs b/crates/subspace-node/src/chain_spec.rs index d33d8186ff..274ebc4d0c 100644 --- a/crates/subspace-node/src/chain_spec.rs +++ b/crates/subspace-node/src/chain_spec.rs @@ -20,6 +20,7 @@ use crate::chain_spec_utils::{ chain_spec_properties, get_account_id_from_seed, get_public_key_from_seed, }; use crate::domain::evm_chain_spec::{self, SpecId}; +use domain_runtime_primitives::MultiAccountId; use hex_literal::hex; use parity_scale_codec::Encode; use sc_chain_spec::GenericChainSpec; @@ -107,6 +108,7 @@ struct GenesisDomainParams { domain_name: String, operator_allow_list: OperatorAllowList, operator_signing_key: OperatorPublicKey, + initial_balances: Vec<(MultiAccountId, Balance)>, } pub fn gemini_3h_compiled() -> Result, String> { @@ -189,6 +191,9 @@ pub fn gemini_3h_compiled() -> Result, St operator_signing_key: OperatorPublicKey::unchecked_from(hex!( "aa3b05b4d649666723e099cf3bafc2f2c04160ebe0e16ddc82f72d6ed97c4b6b" )), + initial_balances: evm_chain_spec::get_testnet_endowed_accounts_by_spec_id( + SpecId::Gemini, + ), }, ) }, @@ -298,6 +303,9 @@ pub fn devnet_config_compiled() -> Result operator_signing_key: OperatorPublicKey::unchecked_from(hex!( "aa3b05b4d649666723e099cf3bafc2f2c04160ebe0e16ddc82f72d6ed97c4b6b" )), + initial_balances: evm_chain_spec::get_testnet_endowed_accounts_by_spec_id( + SpecId::DevNet, + ), }, ) }, @@ -365,6 +373,9 @@ pub fn dev_config() -> Result, String> { domain_name: "evm-domain".to_owned(), operator_allow_list: OperatorAllowList::Anyone, operator_signing_key: get_public_key_from_seed::("Alice"), + initial_balances: evm_chain_spec::get_testnet_endowed_accounts_by_spec_id( + SpecId::Dev, + ), }, ) }, @@ -471,6 +482,7 @@ fn subspace_genesis_config( signing_key: genesis_domain_params.operator_signing_key, nomination_tax: Percent::from_percent(5), minimum_nominator_stake: 100 * SSC, + initial_balances: genesis_domain_params.initial_balances, }), }, } diff --git a/crates/subspace-node/src/domain/evm_chain_spec.rs b/crates/subspace-node/src/domain/evm_chain_spec.rs index cf79caeb21..c45ae4547e 100644 --- a/crates/subspace-node/src/domain/evm_chain_spec.rs +++ b/crates/subspace-node/src/domain/evm_chain_spec.rs @@ -17,6 +17,7 @@ //! EVM domain configurations. use crate::chain_spec_utils::chain_spec_properties; +use domain_runtime_primitives::{AccountId20Converter, MultiAccountId}; use evm_domain_runtime::{ AccountId, BalancesConfig, EVMChainIdConfig, EVMConfig, Precompiles, RuntimeGenesisConfig, SudoConfig, SystemConfig, WASM_BINARY, @@ -24,8 +25,8 @@ use evm_domain_runtime::{ use hex_literal::hex; use sc_chain_spec::GenericChainSpec; use sc_service::ChainType; -use std::str::FromStr; -use subspace_runtime_primitives::SSC; +use sp_runtime::traits::Convert; +use subspace_runtime_primitives::{Balance, SSC}; /// Development keys that will be injected automatically on polkadotjs apps fn get_dev_accounts() -> Vec { @@ -142,40 +143,26 @@ pub fn get_testnet_genesis_by_spec_id(spec_id: SpecId) -> RuntimeGenesisConfig { SpecId::Dev => { let accounts = get_dev_accounts(); testnet_genesis( - accounts.clone(), // Alith is Sudo Some(accounts[0]), ) } - SpecId::Gemini => { - let sudo_account = AccountId::from_str("f31e60022e290708c17d6997c34de6a30d09438f") - .expect("Invalid Sudo account"); - testnet_genesis( - vec![ - // Sudo account - sudo_account, - ], - Some(sudo_account), - ) - } - SpecId::DevNet => { - let sudo_account = AccountId::from_str("b66a91845249464309fad766fd0ece8144547736") - .expect("Invalid Sudo account"); - testnet_genesis( - vec![ - // Sudo account - sudo_account, - ], - Some(sudo_account), - ) - } + SpecId::Gemini => testnet_genesis(None), + SpecId::DevNet => testnet_genesis(None), } } -fn testnet_genesis( - endowed_accounts: Vec, - maybe_sudo_account: Option, -) -> RuntimeGenesisConfig { +pub fn get_testnet_endowed_accounts_by_spec_id(spec_id: SpecId) -> Vec<(MultiAccountId, Balance)> { + match spec_id { + SpecId::Dev => get_dev_accounts() + .into_iter() + .map(|acc| (AccountId20Converter::convert(acc), 1_000_000 * SSC)) + .collect(), + SpecId::DevNet | SpecId::Gemini => vec![], + } +} + +fn testnet_genesis(maybe_sudo_account: Option) -> RuntimeGenesisConfig { // This is the simplest bytecode to revert without returning any data. // We will pre-deploy it under all of our precompiles to ensure they can be called from // within contracts. @@ -187,14 +174,7 @@ fn testnet_genesis( sudo: SudoConfig { key: maybe_sudo_account, }, - balances: BalancesConfig { - // TODO: remove `endowed_accounts` once XDM is ready - balances: endowed_accounts - .iter() - .cloned() - .map(|k| (k, 1_000_000 * SSC)) - .collect(), - }, + balances: BalancesConfig::default(), // this is set to default and chain_id will be set into genesis during the domain // instantiation on Consensus runtime. evm_chain_id: EVMChainIdConfig::default(), diff --git a/crates/subspace-proof-of-space/benches/pos.rs b/crates/subspace-proof-of-space/benches/pos.rs index f9f00271f3..a47738afa0 100644 --- a/crates/subspace-proof-of-space/benches/pos.rs +++ b/crates/subspace-proof-of-space/benches/pos.rs @@ -40,11 +40,33 @@ fn pos_bench( #[cfg(feature = "parallel")] { let mut generator_instance = PosTable::generator(); - group.bench_function("table/parallel", |b| { + group.bench_function("table/parallel/1x", |b| { b.iter(|| { generator_instance.generate_parallel(black_box(&seed)); }); }); + + let mut generator_instances = [ + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + PosTable::generator(), + ]; + group.bench_function("table/parallel/8x", |b| { + b.iter(|| { + rayon::scope(|scope| { + for g in &mut generator_instances { + scope.spawn(|_scope| { + g.generate_parallel(black_box(&seed)); + }); + } + }); + }); + }); } let table = generator_instance.generate(&seed); diff --git a/crates/subspace-proof-of-space/src/chiapos/table.rs b/crates/subspace-proof-of-space/src/chiapos/table.rs index c26399de65..2881171aae 100644 --- a/crates/subspace-proof-of-space/src/chiapos/table.rs +++ b/crates/subspace-proof-of-space/src/chiapos/table.rs @@ -791,7 +791,7 @@ where let mut positions = Vec::with_capacity(t_n.len()); let mut metadatas = Vec::with_capacity(t_n.len()); - for (y, [left_position, right_position], metadata) in t_n { + for (y, [left_position, right_position], metadata) in t_n.drain(..) { ys.push(y); positions.push([left_position, right_position]); // Last table doesn't have metadata @@ -800,6 +800,11 @@ where } } + // Drop from a background thread, which typically helps with overall concurrency + rayon::spawn(move || { + drop(t_n); + }); + Self::Other { ys, positions, diff --git a/crates/subspace-runtime/src/lib.rs b/crates/subspace-runtime/src/lib.rs index 25ccaf0159..b2be9576ca 100644 --- a/crates/subspace-runtime/src/lib.rs +++ b/crates/subspace-runtime/src/lib.rs @@ -36,7 +36,7 @@ use core::mem; use core::num::NonZeroU64; use domain_runtime_primitives::opaque::Header as DomainHeader; use domain_runtime_primitives::{ - BlockNumber as DomainNumber, Hash as DomainHash, MultiAccountId, TryConvertBack, + AccountIdConverter, BlockNumber as DomainNumber, Hash as DomainHash, }; use frame_support::inherent::ProvideInherent; use frame_support::traits::{ @@ -69,8 +69,7 @@ use sp_messenger::messages::{BlockMessagesWithStorageKey, ChainId, CrossDomainMe use sp_messenger_host_functions::{get_storage_key, StorageKeyRequest}; use sp_mmr_primitives::{EncodableOpaqueLeaf, Proof}; use sp_runtime::traits::{ - AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Convert, Keccak256, - NumberFor, + AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Keccak256, NumberFor, }; use sp_runtime::transaction_validity::{TransactionSource, TransactionValidity}; use sp_runtime::{create_runtime_str, generic, AccountId32, ApplyExtrinsicResult, Perbill}; @@ -109,7 +108,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("subspace"), impl_name: create_runtime_str!("subspace"), authoring_version: 0, - spec_version: 0, + spec_version: 2, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 0, @@ -549,23 +548,6 @@ parameter_types! { pub const TransporterEndpointId: EndpointId = 1; } -pub struct AccountIdConverter; - -impl Convert for AccountIdConverter { - fn convert(account_id: AccountId) -> MultiAccountId { - MultiAccountId::AccountId32(account_id.into()) - } -} - -impl TryConvertBack for AccountIdConverter { - fn try_convert_back(multi_account_id: MultiAccountId) -> Option { - match multi_account_id { - MultiAccountId::AccountId32(acc) => Some(AccountId::from(acc)), - _ => None, - } - } -} - impl pallet_transporter::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SelfChainId = SelfChainId; @@ -610,6 +592,8 @@ parameter_types! { pub const MaxNominators: u32 = 256; pub SudoId: AccountId = Sudo::key().expect("Sudo account must exist"); pub const DomainsPalletId: PalletId = PalletId(*b"domains_"); + pub const MaxInitialDomainAccounts: u32 = 10; + pub const MinInitialDomainAccountBalance: Balance = SSC; } // Minimum operator stake must be >= minimum nominator stake since operator is also a nominator. @@ -660,6 +644,9 @@ impl pallet_domains::Config for Runtime { type PalletId = DomainsPalletId; type StorageFee = TransactionFees; type BlockSlot = BlockSlot; + type DomainsTransfersTracker = Transporter; + type MaxInitialDomainAccounts = MaxInitialDomainAccounts; + type MinInitialDomainAccountBalance = MinInitialDomainAccountBalance; } parameter_types! { diff --git a/crates/subspace-service/src/lib.rs b/crates/subspace-service/src/lib.rs index b49d995b4c..f3bf9c0289 100644 --- a/crates/subspace-service/src/lib.rs +++ b/crates/subspace-service/src/lib.rs @@ -880,6 +880,11 @@ where )) }); + if !config.base.network.force_synced { + // Start with DSN sync in this case + pause_sync.store(true, Ordering::Release); + } + let (observer, worker) = sync_from_dsn::create_observer_and_worker( segment_headers_store.clone(), Arc::clone(&network_service), diff --git a/crates/subspace-service/src/sync_from_dsn.rs b/crates/subspace-service/src/sync_from_dsn.rs index 2af30a0fee..9243ddafc3 100644 --- a/crates/subspace-service/src/sync_from_dsn.rs +++ b/crates/subspace-service/src/sync_from_dsn.rs @@ -249,10 +249,8 @@ where .saturating_sub(chain_constants.confirmation_depth_k().into()); let segment_header_downloader = SegmentHeaderDownloader::new(node); - // Node starts as offline, we'll wait for it to go online shrtly after - let mut initial_pause_sync = Some(pause_sync.swap(true, Ordering::AcqRel)); while let Some(reason) = notifications.next().await { - let prev_pause_sync = pause_sync.swap(true, Ordering::AcqRel); + pause_sync.store(true, Ordering::Release); info!(?reason, "Received notification to sync from DSN"); // TODO: Maybe handle failed block imports, additional helpful logging @@ -296,10 +294,7 @@ where } } - pause_sync.store( - initial_pause_sync.take().unwrap_or(prev_pause_sync), - Ordering::Release, - ); + pause_sync.store(false, Ordering::Release); while notifications.try_next().is_ok() { // Just drain extra messages if there are any diff --git a/domains/client/block-preprocessor/src/lib.rs b/domains/client/block-preprocessor/src/lib.rs index 0be63c46ac..bf229d3f79 100644 --- a/domains/client/block-preprocessor/src/lib.rs +++ b/domains/client/block-preprocessor/src/lib.rs @@ -17,11 +17,11 @@ pub mod stateless_runtime; use crate::inherents::is_runtime_upgraded; use codec::Encode; use domain_runtime_primitives::opaque::AccountId; -use domain_runtime_primitives::DomainCoreApi; use sc_client_api::BlockBackend; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::H256; +use sp_domains::core_api::DomainCoreApi; use sp_domains::extrinsics::deduplicate_and_shuffle_extrinsics; use sp_domains::{ DomainId, DomainsApi, ExecutionReceipt, ExtrinsicDigest, HeaderHashingFor, InboxedBundle, diff --git a/domains/client/block-preprocessor/src/stateless_runtime.rs b/domains/client/block-preprocessor/src/stateless_runtime.rs index 8230e03357..81bed75677 100644 --- a/domains/client/block-preprocessor/src/stateless_runtime.rs +++ b/domains/client/block-preprocessor/src/stateless_runtime.rs @@ -1,12 +1,11 @@ use codec::{Codec, Encode}; use domain_runtime_primitives::opaque::AccountId; -use domain_runtime_primitives::{ - Balance, CheckExtrinsicsValidityError, DecodeExtrinsicError, DomainCoreApi, -}; +use domain_runtime_primitives::{Balance, CheckExtrinsicsValidityError, DecodeExtrinsicError}; use sc_executor::RuntimeVersionOf; use sp_api::{ApiError, Core}; use sp_core::traits::{CallContext, CodeExecutor, FetchRuntimeCode, RuntimeCode}; use sp_core::Hasher; +use sp_domains::core_api::DomainCoreApi; use sp_messenger::messages::MessageId; use sp_messenger::MessengerApi; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -248,4 +247,8 @@ where block_hash, ) } + + pub fn transfers_storage_key(&self) -> Result, ApiError> { + >::transfers_storage_key(self, Default::default()) + } } diff --git a/domains/client/domain-operator/src/bundle_processor.rs b/domains/client/domain-operator/src/bundle_processor.rs index f9cb4d6c65..3c36eb5d82 100644 --- a/domains/client/domain-operator/src/bundle_processor.rs +++ b/domains/client/domain-operator/src/bundle_processor.rs @@ -3,7 +3,6 @@ use crate::domain_block_processor::{ }; use crate::ExecutionReceiptFor; use domain_block_preprocessor::DomainBlockPreprocessor; -use domain_runtime_primitives::DomainCoreApi; use sc_client_api::{AuxStore, BlockBackend, Finalizer, ProofProvider}; use sc_consensus::{BlockImportParams, ForkChoiceStrategy, StateAction}; use sp_api::ProvideRuntimeApi; @@ -12,6 +11,7 @@ use sp_consensus::BlockOrigin; use sp_core::traits::CodeExecutor; use sp_core::H256; use sp_domain_digests::AsPredigest; +use sp_domains::core_api::DomainCoreApi; use sp_domains::{DomainId, DomainsApi, ReceiptValidity}; use sp_domains_fraud_proof::FraudProofApi; use sp_messenger::MessengerApi; diff --git a/domains/client/domain-operator/src/domain_block_processor.rs b/domains/client/domain-operator/src/domain_block_processor.rs index ea222854e6..c03a273128 100644 --- a/domains/client/domain-operator/src/domain_block_processor.rs +++ b/domains/client/domain-operator/src/domain_block_processor.rs @@ -6,7 +6,6 @@ use codec::{Decode, Encode}; use domain_block_builder::{BlockBuilder, BuiltBlock, RecordProof}; use domain_block_preprocessor::inherents::get_inherent_data; use domain_block_preprocessor::PreprocessResult; -use domain_runtime_primitives::DomainCoreApi; use sc_client_api::{AuxStore, BlockBackend, Finalizer, ProofProvider}; use sc_consensus::{ BlockImportParams, ForkChoiceStrategy, ImportResult, SharedBlockImport, StateAction, @@ -18,6 +17,7 @@ use sp_blockchain::{HashAndNumber, HeaderBackend, HeaderMetadata}; use sp_consensus::{BlockOrigin, SyncOracle}; use sp_core::traits::CodeExecutor; use sp_core::H256; +use sp_domains::core_api::DomainCoreApi; use sp_domains::merkle_tree::MerkleTree; use sp_domains::{BundleValidity, DomainId, DomainsApi, ExecutionReceipt, HeaderHashingFor}; use sp_domains_fraud_proof::fraud_proof::{FraudProof, ValidBundleProof}; @@ -333,7 +333,9 @@ where // } // } - let mut roots = self.client.runtime_api().intermediate_roots(header_hash)?; + let runtime_api = self.client.runtime_api(); + + let mut roots = runtime_api.intermediate_roots(header_hash)?; let encoded_state_root = state_root .encode() @@ -381,7 +383,8 @@ where // Get the accumulated transaction fee of all transactions included in the block // and used as the operator reward - let block_fees = self.client.runtime_api().block_fees(header_hash)?; + let block_fees = runtime_api.block_fees(header_hash)?; + let transfers = runtime_api.transfers(header_hash)?; let execution_receipt = ExecutionReceipt { domain_block_number: header_number, @@ -396,8 +399,7 @@ where execution_trace: trace, execution_trace_root: sp_core::H256(trace_root), block_fees, - // TODO: Fetch transfers from the runtime - transfers: Default::default(), + transfers, }; Ok(DomainBlockResult { @@ -640,7 +642,10 @@ where // the extrinsic can be considered as invalid due to multiple `invalid_type` (i.e. an extrinsic // can be `OutOfRangeTx` and `InvalidXDM` at the same time) thus use the checking order and // consider the first check as the mismatch. - Ordering::Equal => match local_invalid_type.checking_order().cmp(&external_invalid_type.checking_order()) { + Ordering::Equal => match local_invalid_type + .checking_order() + .cmp(&external_invalid_type.checking_order()) + { Ordering::Less => BundleMismatchType::TrueInvalid(local_invalid_type), Ordering::Greater => BundleMismatchType::FalseInvalid(external_invalid_type), Ordering::Equal => unreachable!( @@ -926,6 +931,17 @@ where }); } + if bad_receipt.transfers != local_receipt.transfers { + return self + .fraud_proof_generator + .generate_invalid_transfers_proof(self.domain_id, &local_receipt, bad_receipt_hash) + .map_err(|err| { + sp_blockchain::Error::Application(Box::from(format!( + "Failed to generate invalid transfers fraud proof: {err}" + ))) + }); + } + if bad_receipt.domain_block_hash != local_receipt.domain_block_hash { return self .fraud_proof_generator @@ -995,11 +1011,11 @@ mod tests { find_inboxed_bundles_mismatch::( &create_test_execution_receipt(vec![InboxedBundle::invalid( InvalidBundleType::UndecodableTx(0), - Default::default() + Default::default(), )]), &create_test_execution_receipt(vec![InboxedBundle::invalid( InvalidBundleType::UndecodableTx(0), - Default::default() + Default::default(), )]), ) .unwrap(), @@ -1141,11 +1157,11 @@ mod tests { find_inboxed_bundles_mismatch::( &create_test_execution_receipt(vec![InboxedBundle::valid( H256::random(), - Default::default() + Default::default(), ),]), &create_test_execution_receipt(vec![InboxedBundle::invalid( InvalidBundleType::IllegalTx(3), - Default::default() + Default::default(), ),]), ) .unwrap(), @@ -1160,11 +1176,11 @@ mod tests { find_inboxed_bundles_mismatch::( &create_test_execution_receipt(vec![InboxedBundle::invalid( InvalidBundleType::IllegalTx(3), - Default::default() + Default::default(), ),]), &create_test_execution_receipt(vec![InboxedBundle::valid( H256::random(), - Default::default() + Default::default(), ),]), ) .unwrap(), diff --git a/domains/client/domain-operator/src/domain_bundle_producer.rs b/domains/client/domain-operator/src/domain_bundle_producer.rs index 81334b6f32..5014b64ec9 100644 --- a/domains/client/domain-operator/src/domain_bundle_producer.rs +++ b/domains/client/domain-operator/src/domain_bundle_producer.rs @@ -3,11 +3,11 @@ use crate::domain_bundle_proposer::DomainBundleProposer; use crate::utils::OperatorSlotInfo; use crate::BundleSender; use codec::Decode; -use domain_runtime_primitives::DomainCoreApi; use sc_client_api::{AuxStore, BlockBackend}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_blockchain::HeaderBackend; +use sp_domains::core_api::DomainCoreApi; use sp_domains::{ Bundle, BundleProducerElectionApi, DomainId, DomainsApi, OperatorId, OperatorPublicKey, OperatorSignature, SealedBundleHeader, diff --git a/domains/client/domain-operator/src/domain_bundle_proposer.rs b/domains/client/domain-operator/src/domain_bundle_proposer.rs index ffd1f35925..927e23b0f4 100644 --- a/domains/client/domain-operator/src/domain_bundle_proposer.rs +++ b/domains/client/domain-operator/src/domain_bundle_proposer.rs @@ -1,12 +1,12 @@ use crate::ExecutionReceiptFor; use codec::Encode; -use domain_runtime_primitives::DomainCoreApi; use futures::{select, FutureExt}; use sc_client_api::{AuxStore, BlockBackend}; use sc_transaction_pool_api::InPoolTransaction; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder; use sp_blockchain::HeaderBackend; +use sp_domains::core_api::DomainCoreApi; use sp_domains::{ BundleHeader, DomainId, DomainsApi, ExecutionReceipt, HeaderHashingFor, ProofOfElection, }; diff --git a/domains/client/domain-operator/src/domain_worker.rs b/domains/client/domain-operator/src/domain_worker.rs index d18942c988..154483a4e1 100644 --- a/domains/client/domain-operator/src/domain_worker.rs +++ b/domains/client/domain-operator/src/domain_worker.rs @@ -18,7 +18,6 @@ use crate::bundle_processor::BundleProcessor; use crate::domain_bundle_producer::DomainBundleProducer; use crate::utils::{BlockInfo, OperatorSlotInfo}; use crate::{NewSlotNotification, OperatorStreams}; -use domain_runtime_primitives::DomainCoreApi; use futures::channel::mpsc; use futures::{SinkExt, Stream, StreamExt}; use sc_client_api::{ @@ -30,6 +29,7 @@ use sp_block_builder::BlockBuilder; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_core::traits::{CodeExecutor, SpawnEssentialNamed}; use sp_core::H256; +use sp_domains::core_api::DomainCoreApi; use sp_domains::{BundleProducerElectionApi, DomainsApi, OpaqueBundle, OperatorId}; use sp_domains_fraud_proof::FraudProofApi; use sp_messenger::MessengerApi; diff --git a/domains/client/domain-operator/src/fraud_proof.rs b/domains/client/domain-operator/src/fraud_proof.rs index d4294ce9b9..ed07f9c5b4 100644 --- a/domains/client/domain-operator/src/fraud_proof.rs +++ b/domains/client/domain-operator/src/fraud_proof.rs @@ -3,20 +3,22 @@ use crate::ExecutionReceiptFor; use codec::{Decode, Encode}; use domain_block_builder::{BlockBuilder, RecordProof}; use domain_runtime_primitives::opaque::AccountId; -use domain_runtime_primitives::{CheckExtrinsicsValidityError, DomainCoreApi}; +use domain_runtime_primitives::CheckExtrinsicsValidityError; use sc_client_api::{AuxStore, BlockBackend, ProofProvider}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::traits::CodeExecutor; use sp_core::H256; use sp_domain_digests::AsPredigest; +use sp_domains::core_api::DomainCoreApi; use sp_domains::proof_provider_and_verifier::StorageProofProvider; use sp_domains::{DomainId, DomainsApi, ExtrinsicDigest, HeaderHashingFor, InvalidBundleType}; use sp_domains_fraud_proof::execution_prover::ExecutionProver; use sp_domains_fraud_proof::fraud_proof::{ ApplyExtrinsicMismatch, ExecutionPhase, FinalizeBlockMismatch, FraudProof, InvalidBlockFeesProof, InvalidBundlesFraudProof, InvalidDomainBlockHashProof, - InvalidExtrinsicsRootProof, InvalidStateTransitionProof, ValidBundleDigest, + InvalidExtrinsicsRootProof, InvalidStateTransitionProof, InvalidTransfersProof, + ValidBundleDigest, }; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; @@ -57,7 +59,7 @@ pub enum FraudProofError { decoding_error: codec::Error, }, #[error( - "Invalid extrinsic index for creating illegal tx fraud proof, \ + "Invalid extrinsic index for creating illegal tx fraud proof, \ expected extrinsic index: {index},\ is_true_invalid: {is_true_invalid} and validity response is: {extrinsics_validity_response:?}" )] @@ -149,6 +151,25 @@ where })) } + pub(crate) fn generate_invalid_transfers_proof( + &self, + domain_id: DomainId, + local_receipt: &ExecutionReceiptFor, + bad_receipt_hash: Block::Hash, + ) -> Result, FraudProofError> { + let block_hash = local_receipt.domain_block_hash; + let runtime_api = self.client.runtime_api(); + let key = runtime_api.transfers_storage_key(block_hash)?; + let proof = self + .client + .read_proof(block_hash, &mut [key.as_slice()].into_iter())?; + Ok(FraudProof::InvalidTransfers(InvalidTransfersProof { + domain_id, + bad_receipt_hash, + storage_proof: proof, + })) + } + pub(crate) fn generate_invalid_domain_block_hash_proof( &self, domain_id: DomainId, @@ -203,7 +224,7 @@ where .to_string() .into(), ) - .into()) + .into()); } }; @@ -294,7 +315,7 @@ where StorageProofProvider::< LayoutV1>, >::generate_enumerated_proof_of_inclusion( - encoded_extrinsics.as_slice(), extrinsic_index + encoded_extrinsics.as_slice(), extrinsic_index, ) .ok_or(FraudProofError::FailToGenerateProofOfInclusion)? }; diff --git a/domains/client/domain-operator/src/operator.rs b/domains/client/domain-operator/src/operator.rs index da8c9521e3..9e8abfb5ff 100644 --- a/domains/client/domain-operator/src/operator.rs +++ b/domains/client/domain-operator/src/operator.rs @@ -4,7 +4,6 @@ use crate::domain_bundle_producer::DomainBundleProducer; use crate::domain_bundle_proposer::DomainBundleProposer; use crate::fraud_proof::FraudProofGenerator; use crate::{DomainImportNotifications, NewSlotNotification, OperatorParams}; -use domain_runtime_primitives::DomainCoreApi; use futures::channel::mpsc; use futures::{FutureExt, Stream}; use sc_client_api::{ @@ -15,6 +14,7 @@ use sp_api::ProvideRuntimeApi; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_core::traits::{CodeExecutor, SpawnEssentialNamed}; use sp_core::H256; +use sp_domains::core_api::DomainCoreApi; use sp_domains::{BundleProducerElectionApi, DomainsApi}; use sp_domains_fraud_proof::FraudProofApi; use sp_keystore::KeystorePtr; diff --git a/domains/client/domain-operator/src/tests.rs b/domains/client/domain-operator/src/tests.rs index 8699bcc0f0..a7371fc009 100644 --- a/domains/client/domain-operator/src/tests.rs +++ b/domains/client/domain-operator/src/tests.rs @@ -5,7 +5,7 @@ use crate::fraud_proof::{FraudProofGenerator, TraceDiffType}; use crate::tests::TxPoolError::InvalidTransaction as TxPoolInvalidTransaction; use crate::OperatorSlotInfo; use codec::{Decode, Encode}; -use domain_runtime_primitives::{DomainCoreApi, Hash}; +use domain_runtime_primitives::Hash; use domain_test_primitives::{OnchainStateApi, TimestampApi}; use domain_test_service::evm_domain_test_runtime::{Header, UncheckedExtrinsic}; use domain_test_service::EcdsaKeyring::{Alice, Bob, Charlie, Eve}; @@ -24,13 +24,16 @@ use sp_core::storage::StateVersion; use sp_core::traits::FetchRuntimeCode; use sp_core::{Pair, H256}; use sp_domain_digests::AsPredigest; +use sp_domains::core_api::DomainCoreApi; use sp_domains::merkle_tree::MerkleTree; use sp_domains::{ - Bundle, BundleValidity, DomainsApi, HeaderHashingFor, InboxedBundle, InvalidBundleType, + Bundle, BundleValidity, ChainId, DomainsApi, HeaderHashingFor, InboxedBundle, + InvalidBundleType, Transfers, }; use sp_domains_fraud_proof::fraud_proof::{ ApplyExtrinsicMismatch, ExecutionPhase, FinalizeBlockMismatch, FraudProof, InvalidBlockFeesProof, InvalidDomainBlockHashProof, InvalidExtrinsicsRootProof, + InvalidTransfersProof, }; use sp_domains_fraud_proof::InvalidTransactionCode; use sp_runtime::generic::{BlockId, DigestItem}; @@ -38,10 +41,11 @@ use sp_runtime::traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as use sp_runtime::transaction_validity::InvalidTransaction; use sp_runtime::OpaqueExtrinsic; use sp_state_machine::backend::AsTrieBackend; +use std::collections::BTreeMap; use std::sync::Arc; use subspace_core_primitives::PotOutput; use subspace_runtime_primitives::opaque::Block as CBlock; -use subspace_runtime_primitives::Balance; +use subspace_runtime_primitives::{Balance, SSC}; use subspace_test_service::{ produce_block_with, produce_blocks, produce_blocks_until, MockConsensusNode, }; @@ -144,8 +148,7 @@ async fn test_domain_chain_fork_choice() { let mut alice_import_notification_stream = alice.client.every_import_notification_stream(); // Fork B produce a consenus block that contains bundles - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.is_some()); + let (slot, _) = ferdie.produce_slot_and_wait_for_bundle_submission().await; let fork_b_block_hash = ferdie .produce_block_with_slot_at(slot, common_consensus_hash, None) .await @@ -162,8 +165,7 @@ async fn test_domain_chain_fork_choice() { assert_eq!(alice.client.info().best_hash, domain_hash_3); // Produce one more consensus block on fork B to make it the best fork - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.is_some()); + let (slot, _) = ferdie.produce_slot_and_wait_for_bundle_submission().await; let fork_b_block_hash = ferdie .produce_block_with_slot_at(slot, fork_b_block_hash, Some(vec![])) .await @@ -268,12 +270,9 @@ async fn test_domain_block_production() { // Simply producing more block on fork C for _ in 0..10 { - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let (slot, opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; let tx = subspace_test_runtime::UncheckedExtrinsic::new_unsigned( - pallet_domains::Call::submit_bundle { - opaque_bundle: bundle.unwrap(), - } - .into(), + pallet_domains::Call::submit_bundle { opaque_bundle }.into(), ) .into(); // Produce consensus block that only contains the `submit_bundle` extrinsic instead of @@ -423,7 +422,7 @@ async fn test_domain_block_deriving_from_multiple_bundles() { // Produce a bundle and submit to the tx pool of the consensus node let (_, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert_eq!(bundle.unwrap().extrinsics.len(), 1); + assert_eq!(bundle.extrinsics.len(), 1); } let slot = ferdie.produce_slot(); @@ -616,7 +615,7 @@ async fn collected_receipts_should_be_on_the_same_branch_with_current_best_block let hash_2 = consensus_node.client.hash(2).unwrap().unwrap(); let header_2 = consensus_node.client.header(hash_2).unwrap().unwrap(); assert_eq!( - receipts_consensus_info(signed_bundle.unwrap()), + receipts_consensus_info(signed_bundle), consensus_block_info(header_2) ); } @@ -846,8 +845,7 @@ async fn test_bad_invalid_state_transition_proof_is_rejected() { .expect("Failed to send extrinsic"); // Produce a bundle that contains the previously sent extrinsic and record that bundle for later use - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let target_bundle = bundle.unwrap(); + let (slot, target_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; assert_eq!(target_bundle.extrinsics.len(), 1); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await @@ -855,7 +853,7 @@ async fn test_bad_invalid_state_transition_proof_is_rejected() { // We get the receipt of target bundle let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let valid_receipt = bundle.unwrap().into_receipt(); + let valid_receipt = bundle.into_receipt(); assert_eq!(valid_receipt.execution_trace.len(), 5); let valid_receipt_hash = valid_receipt.hash::(); @@ -1108,29 +1106,22 @@ async fn test_invalid_state_transition_proof_creation_and_verification( .expect("Failed to send extrinsic"); // Produce a bundle that contains the previously sent extrinsic and record that bundle for later use - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let target_bundle = bundle.unwrap(); + let (slot, target_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; assert_eq!(target_bundle.extrinsics.len(), 1); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await .unwrap(); // Get a bundle from the txn pool and modify the receipt of the target bundle to an invalid one - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); - let original_length = bundle - .as_ref() - .map(|opaque_bundle| { - opaque_bundle - .sealed_header - .header - .receipt - .execution_trace - .len() - }) - .expect("Bundle should exists; qed"); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); + let original_length = opaque_bundle + .sealed_header + .header + .receipt + .execution_trace + .len(); let (bad_receipt_hash, bad_submit_bundle_tx) = { - let mut opaque_bundle = bundle.unwrap(); let receipt = &mut opaque_bundle.sealed_header.header.receipt; assert_eq!(receipt.execution_trace.len(), 5); @@ -1305,20 +1296,18 @@ async fn test_true_invalid_bundles_inherent_extrinsic_proof_creation_and_verific .expect("Failed to send extrinsic"); // Produce a bundle that contains the previously sent extrinsic and record that bundle for later use - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let target_bundle = bundle.unwrap(); + let (slot, target_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; assert_eq!(target_bundle.extrinsics.len(), 1); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await .unwrap(); // Get a bundle from the txn pool and modify the receipt of the target bundle to an invalid one - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); let extrinsics: Vec>; let bundle_extrinsic_root; let bad_submit_bundle_tx = { - let mut opaque_bundle = bundle.unwrap(); opaque_bundle.extrinsics.push(inherent_extrinsic()); extrinsics = opaque_bundle .extrinsics @@ -1353,11 +1342,10 @@ async fn test_true_invalid_bundles_inherent_extrinsic_proof_creation_and_verific .unwrap(); // produce another bundle that marks the previous extrinsic as invalid. - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); let (bad_receipt_hash, bad_submit_bundle_tx) = { - let mut opaque_bundle = bundle.unwrap(); let bad_receipt = &mut opaque_bundle.sealed_header.header.receipt; // bad receipt marks this particular bundle as valid even though bundle contains inherent extrinsic bad_receipt.inboxed_bundles = @@ -1455,8 +1443,7 @@ async fn test_false_invalid_bundles_inherent_extrinsic_proof_creation_and_verifi .expect("Failed to send extrinsic"); // Produce a bundle that contains the previously sent extrinsic and record that bundle for later use - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let target_bundle = bundle.unwrap(); + let (slot, target_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; assert_eq!(target_bundle.extrinsics.len(), 1); let extrinsics: Vec> = target_bundle .extrinsics @@ -1471,11 +1458,10 @@ async fn test_false_invalid_bundles_inherent_extrinsic_proof_creation_and_verifi .unwrap(); // produce another bundle that marks the previous valid extrinsic as invalid. - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); let (bad_receipt_hash, bad_submit_bundle_tx) = { - let mut opaque_bundle = bundle.unwrap(); let bad_receipt = &mut opaque_bundle.sealed_header.header.receipt; // bad receipt marks this particular bundle as invalid even though bundle does not contain // inherent extrinsic @@ -1566,16 +1552,15 @@ async fn test_true_invalid_bundles_illegal_extrinsic_proof_creation_and_verifica }; // Produce a bundle that contains the previously sent extrinsic and record that bundle for later use - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let target_bundle = bundle.unwrap(); + let (slot, target_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; assert_eq!(target_bundle.extrinsics.len(), 0); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await .unwrap(); // Get a bundle from the txn pool and modify the receipt of the target bundle to an invalid one - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); let alice_balance = alice.free_balance(Alice.to_account_id()); let mut alice_nonce = alice.account_nonce(); @@ -1612,7 +1597,6 @@ async fn test_true_invalid_bundles_illegal_extrinsic_proof_creation_and_verifica let extrinsics: Vec>; let bundle_extrinsic_root; let bad_submit_bundle_tx = { - let mut opaque_bundle = bundle.unwrap(); opaque_bundle.extrinsics = vec![ transfer_to_charlie_with_big_tip_1.into(), transfer_to_charlie_with_big_tip_2.into(), @@ -1651,11 +1635,10 @@ async fn test_true_invalid_bundles_illegal_extrinsic_proof_creation_and_verifica .unwrap(); // produce another bundle that marks the previous extrinsic as invalid. - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); let (bad_receipt_hash, bad_submit_bundle_tx) = { - let mut opaque_bundle = bundle.unwrap(); let bad_receipt = &mut opaque_bundle.sealed_header.header.receipt; // bad receipt marks this particular bundle as valid even though bundle contains illegal extrinsic bad_receipt.inboxed_bundles = @@ -1780,8 +1763,7 @@ async fn test_false_invalid_bundles_illegal_extrinsic_proof_creation_and_verific .expect("Failed to send extrinsic"); // Produce a bundle that contains the previously sent extrinsic and record that bundle for later use - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let target_bundle = bundle.unwrap(); + let (slot, target_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; assert_eq!(target_bundle.extrinsics.len(), 2); let bundle_extrinsic_root = target_bundle.extrinsics_root(); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) @@ -1789,11 +1771,10 @@ async fn test_false_invalid_bundles_illegal_extrinsic_proof_creation_and_verific .unwrap(); // produce another bundle that marks the previous valid extrinsic as invalid. - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); let (bad_receipt_hash, bad_submit_bundle_tx) = { - let mut opaque_bundle = bundle.unwrap(); let bad_receipt = &mut opaque_bundle.sealed_header.header.receipt; // bad receipt marks this particular bundle as invalid even though bundle does not contain // illegal tx @@ -1895,18 +1876,16 @@ async fn test_invalid_block_fees_proof_creation() { .expect("Failed to send extrinsic"); // Produce a bundle that contains the previously sent extrinsic and record that bundle for later use - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let target_bundle = bundle.unwrap(); + let (slot, target_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; assert_eq!(target_bundle.extrinsics.len(), 1); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await .unwrap(); // Get a bundle from the txn pool and modify the receipt of the target bundle to an invalid one - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); let (bad_receipt_hash, bad_submit_bundle_tx) = { - let mut opaque_bundle = bundle.unwrap(); let receipt = &mut opaque_bundle.sealed_header.header.receipt; receipt.block_fees = Default::default(); opaque_bundle.sealed_header.signature = Sr25519Keyring::Alice @@ -1956,6 +1935,114 @@ async fn test_invalid_block_fees_proof_creation() { assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); } +#[tokio::test(flavor = "multi_thread")] +async fn test_invalid_transfers_fraud_proof() { + let directory = TempDir::new().expect("Must be able to create temporary directory"); + + let mut builder = sc_cli::LoggerBuilder::new(""); + builder.with_colors(false); + let _ = builder.init(); + + let tokio_handle = tokio::runtime::Handle::current(); + + // Start Ferdie + let mut ferdie = MockConsensusNode::run( + tokio_handle.clone(), + Ferdie, + BasePath::new(directory.path().join("ferdie")), + ); + + // Run Alice (a evm domain authority node) + let mut alice = domain_test_service::DomainNodeBuilder::new( + tokio_handle.clone(), + Alice, + BasePath::new(directory.path().join("alice")), + ) + .build_evm_node(Role::Authority, GENESIS_DOMAIN_ID, &mut ferdie) + .await; + + let bundle_to_tx = |opaque_bundle| { + subspace_test_runtime::UncheckedExtrinsic::new_unsigned( + pallet_domains::Call::submit_bundle { opaque_bundle }.into(), + ) + .into() + }; + + produce_blocks!(ferdie, alice, 5).await.unwrap(); + + alice + .construct_and_send_extrinsic(pallet_balances::Call::transfer_allow_death { + dest: Bob.to_account_id(), + value: 1, + }) + .await + .expect("Failed to send extrinsic"); + + // Produce a bundle that contains the previously sent extrinsic and record that bundle for later use + let (slot, target_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + assert_eq!(target_bundle.extrinsics.len(), 1); + produce_block_with!(ferdie.produce_block_with_slot(slot), alice) + .await + .unwrap(); + + // Get a bundle from the txn pool and modify the receipt of the target bundle to an invalid one + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); + let (bad_receipt_hash, bad_submit_bundle_tx) = { + let receipt = &mut opaque_bundle.sealed_header.header.receipt; + receipt.transfers = Transfers { + transfers_in: BTreeMap::from([(ChainId::Consensus, 10 * SSC)]), + transfers_out: BTreeMap::from([(ChainId::Consensus, 10 * SSC)]), + rejected_transfers_claimed: Default::default(), + transfers_rejected: Default::default(), + }; + opaque_bundle.sealed_header.signature = Sr25519Keyring::Alice + .pair() + .sign(opaque_bundle.sealed_header.pre_hash().as_ref()) + .into(); + ( + opaque_bundle.receipt().hash::(), + bundle_to_tx(opaque_bundle), + ) + }; + + // Replace `original_submit_bundle_tx` with `bad_submit_bundle_tx` in the tx pool + ferdie + .prune_tx_from_pool(&original_submit_bundle_tx) + .await + .unwrap(); + assert!(ferdie.get_bundle_from_tx_pool(slot).is_none()); + + ferdie + .submit_transaction(bad_submit_bundle_tx) + .await + .unwrap(); + + // Wait for the fraud proof that target the bad ER + let wait_for_fraud_proof_fut = ferdie.wait_for_fraud_proof(move |fp| { + matches!( + fp, + FraudProof::InvalidTransfers(InvalidTransfersProof { .. }) + ) + }); + + // Produce a consensus block that contains the `bad_submit_bundle_tx` and the bad receipt should + // be added to the consensus chain block tree + produce_block_with!(ferdie.produce_block_with_slot(slot), alice) + .await + .unwrap(); + assert!(ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); + + // When the domain node operator process the primary block that contains the `bad_submit_bundle_tx`, + // it will generate and submit a fraud proof + let _ = wait_for_fraud_proof_fut.await; + + // Produce a consensus block that contains the fraud proof, the fraud proof wil be verified + // and executed, thus pruned the bad receipt from the block tree + ferdie.produce_blocks(1).await.unwrap(); + assert!(!ferdie.does_receipt_exist(bad_receipt_hash).unwrap()); +} + #[tokio::test(flavor = "multi_thread")] async fn test_invalid_domain_block_hash_proof_creation() { let directory = TempDir::new().expect("Must be able to create temporary directory"); @@ -2000,18 +2087,16 @@ async fn test_invalid_domain_block_hash_proof_creation() { .expect("Failed to send extrinsic"); // Produce a bundle that contains the previously sent extrinsic and record that bundle for later use - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let target_bundle = bundle.unwrap(); + let (slot, target_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; assert_eq!(target_bundle.extrinsics.len(), 1); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await .unwrap(); // Get a bundle from the txn pool and modify the receipt of the target bundle to an invalid one - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); let (bad_receipt_hash, bad_submit_bundle_tx) = { - let mut opaque_bundle = bundle.unwrap(); let receipt = &mut opaque_bundle.sealed_header.header.receipt; receipt.domain_block_hash = Default::default(); opaque_bundle.sealed_header.signature = Sr25519Keyring::Alice @@ -2105,18 +2190,16 @@ async fn test_invalid_domain_extrinsics_root_proof_creation() { .expect("Failed to send extrinsic"); // Produce a bundle that contains the previously sent extrinsic and record that bundle for later use - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let target_bundle = bundle.unwrap(); + let (slot, target_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; assert_eq!(target_bundle.extrinsics.len(), 1); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await .unwrap(); // Get a bundle from the txn pool and modify the receipt of the target bundle to an invalid one - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); let (bad_receipt_hash, bad_submit_bundle_tx) = { - let mut opaque_bundle = bundle.unwrap(); let receipt = &mut opaque_bundle.sealed_header.header.receipt; receipt.domain_block_extrinsic_root = Default::default(); opaque_bundle.sealed_header.signature = Sr25519Keyring::Alice @@ -2201,8 +2284,8 @@ async fn test_bundle_equivocation_fraud_proof() { .into() }; - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); // Remove the original bundle submission and resubmit it again. // This is done since when the bundle is submitted through offchain transaction submission @@ -2227,7 +2310,6 @@ async fn test_bundle_equivocation_fraud_proof() { // change the bundle contents such that we derive a new bundle // with same slot and proof of election such that this leads to bundle equivocation. let equivocated_bundle_tx = { - let mut opaque_bundle = bundle.unwrap(); let receipt = &mut opaque_bundle.sealed_header.header.receipt; receipt.domain_block_extrinsic_root = Default::default(); opaque_bundle.sealed_header.signature = Sr25519Keyring::Alice @@ -2322,8 +2404,6 @@ async fn test_domain_block_builder_include_ext_with_failed_execution() { // Produce a bundle and submit to the tx pool of the consensus node let (_slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.is_some()); - let bundle = bundle.unwrap(); assert_eq!(bundle.extrinsics.len(), 2); // produce block and import domain block @@ -2339,8 +2419,6 @@ async fn test_domain_block_builder_include_ext_with_failed_execution() { // pre_timestamp_root + pre_consensus_chain_byte_fee_root + pre_success_ext_root + pre_failed_ext_root // + pre_finalize_block_root + post_finalize_block_root let (_slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.is_some()); - let bundle = bundle.unwrap(); let er = bundle.receipt(); assert_eq!(er.execution_trace.len(), 6); assert_eq!(er.execution_trace[5], er.final_state_root); @@ -2392,8 +2470,6 @@ async fn test_domain_block_builder_include_ext_with_failed_predispatch() { // Produce a bundle and submit to the tx pool of the consensus node let (_slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.is_some()); - let bundle = bundle.unwrap(); assert_eq!(bundle.extrinsics.len(), 1); // we produce another bundle with similar transaction @@ -2417,8 +2493,6 @@ async fn test_domain_block_builder_include_ext_with_failed_predispatch() { // Produce a bundle and submit to the tx pool of the consensus node let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.is_some()); - let bundle = bundle.unwrap(); assert_eq!(bundle.extrinsics.len(), 1); // produce block and import domain block @@ -2436,8 +2510,6 @@ async fn test_domain_block_builder_include_ext_with_failed_predispatch() { // pre_timestamp_root + pre_consensus_chain_byte_fee_root + pre_success_ext_root + pre_failed_ext_root // + pre_finalize_block_root + post_finalize_block_root let (_slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.is_some()); - let bundle = bundle.unwrap(); let er = bundle.sealed_header.header.receipt; assert_eq!(er.execution_trace.len(), 6); @@ -2452,7 +2524,7 @@ async fn test_domain_block_builder_include_ext_with_failed_predispatch() { .iter() .map(Encode::encode) .collect(), - sp_core::storage::StateVersion::V1 + sp_core::storage::StateVersion::V1, ) ); } @@ -2497,8 +2569,7 @@ async fn test_valid_bundle_proof_generation_and_verification() { .expect("Failed to send extrinsic"); // Produce a bundle and submit to the tx pool of the consensus node - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.is_some()); + let (slot, _) = ferdie.produce_slot_and_wait_for_bundle_submission().await; // In the last iteration, produce a consensus block which will included all the previous bundles if i == 2 { @@ -2525,11 +2596,10 @@ async fn test_valid_bundle_proof_generation_and_verification() { // Produce a bundle that will include the reciept of the last 3 bundles and modified the receipt's // `inboxed_bundles` field to make it invalid - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(bundle.clone()); let bundle_index = 1; let (bad_receipt, submit_bundle_tx_with_bad_receipt) = { - let mut bundle = bundle.unwrap(); assert_eq!(bundle.receipt().inboxed_bundles.len(), 3); bundle.sealed_header.header.receipt.inboxed_bundles[bundle_index].bundle = @@ -2728,8 +2798,7 @@ async fn pallet_domains_unsigned_extrinsics_should_work() { produce_blocks!(ferdie, alice, 1).await.unwrap(); // Get a bundle from alice's tx pool and used as bundle template. - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let _bundle_template = bundle.unwrap(); + let (slot, _) = ferdie.produce_slot_and_wait_for_bundle_submission().await; let _alice_key = alice.key; // Drop alice in order to control the execution chain by submitting the receipts manually later. drop(alice); @@ -2816,13 +2885,10 @@ async fn duplicated_bundle_should_be_rejected() { produce_blocks!(ferdie, alice, 1).await.unwrap(); - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let (slot, opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; let submit_bundle_tx: OpaqueExtrinsic = subspace_test_runtime::UncheckedExtrinsic::new_unsigned( - pallet_domains::Call::submit_bundle { - opaque_bundle: bundle.unwrap(), - } - .into(), + pallet_domains::Call::submit_bundle { opaque_bundle }.into(), ) .into(); @@ -2908,7 +2974,7 @@ async fn stale_and_in_future_bundle_should_be_rejected() { // Produce one block that only included `bundle1` produce_block_with!( - ferdie.produce_block_with_extrinsics(vec![bundle_to_tx(bundle1.unwrap())]), + ferdie.produce_block_with_extrinsics(vec![bundle_to_tx(bundle1)]), alice ) .await @@ -2916,7 +2982,7 @@ async fn stale_and_in_future_bundle_should_be_rejected() { // `bundle2` will be rejected because its PoT is stale match ferdie - .submit_transaction(bundle_to_tx(bundle2.unwrap())) + .submit_transaction(bundle_to_tx(bundle2)) .await .unwrap_err() { @@ -3021,12 +3087,9 @@ async fn existing_bundle_can_be_resubmitted_to_new_fork() { let pre_alice_best_number = alice.client.info().best_number; let mut parent_hash = ferdie.client.info().best_hash; - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let (slot, opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; let submit_bundle_tx = subspace_test_runtime::UncheckedExtrinsic::new_unsigned( - pallet_domains::Call::submit_bundle { - opaque_bundle: bundle.unwrap(), - } - .into(), + pallet_domains::Call::submit_bundle { opaque_bundle }.into(), ) .into(); @@ -3547,7 +3610,7 @@ async fn test_domain_transaction_fee_and_operator_reward() { // Produce a bundle that contains the just sent extrinsic let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert_eq!(bundle.unwrap().extrinsics.len(), 1); + assert_eq!(bundle.extrinsics.len(), 1); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await .unwrap(); @@ -3555,7 +3618,7 @@ async fn test_domain_transaction_fee_and_operator_reward() { // Produce one more bundle, this bundle should contains the ER of the previous bundle let (_, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let receipt = bundle.unwrap().into_receipt(); + let receipt = bundle.into_receipt(); assert_eq!(receipt.consensus_block_hash, consensus_block_hash); // Transaction fee (including the tip) is deducted from alice's account @@ -3627,19 +3690,18 @@ async fn test_multiple_consensus_blocks_derive_similar_domain_block() { .expect("Failed to send extrinsic"); // Fork A with bundle that contains above transaction - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; let consensus_block_hash_fork_a = ferdie .produce_block_with_slot_at( slot, common_block_hash, - Some(vec![bundle_to_tx(bundle.clone().unwrap())]), + Some(vec![bundle_to_tx(opaque_bundle.clone())]), ) .await .unwrap(); // Fork B let bundle = { - let mut opaque_bundle = bundle.unwrap(); opaque_bundle.extrinsics = vec![]; opaque_bundle.sealed_header.header.bundle_extrinsics_root = sp_domains::EMPTY_EXTRINSIC_ROOT; @@ -3751,7 +3813,8 @@ async fn test_skip_empty_bundle_production() { let consensus_block_number = ferdie.client.info().best_number; let domain_block_number = alice.client.info().best_number; - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let slot = ferdie.produce_slot(); + let bundle = ferdie.notify_new_slot_and_wait_for_bundle(slot).await; ferdie.produce_block_with_slot(slot).await.unwrap(); // Alice will skip producing bundle since there is no domain extrinsic @@ -3761,11 +3824,10 @@ async fn test_skip_empty_bundle_production() { // Send a domain extrinsic, Alice will start producing bundle alice.send_system_remark().await; - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let (slot, _) = ferdie.produce_slot_and_wait_for_bundle_submission().await; produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await .unwrap(); - assert!(bundle.is_some()); assert_eq!(ferdie.client.info().best_number, consensus_block_number + 2); assert_eq!(alice.client.info().best_number, domain_block_number + 1); } @@ -3826,10 +3888,9 @@ async fn test_bad_receipt_chain() { produce_blocks!(ferdie, alice, 5).await.unwrap(); // Get a bundle from the txn pool and modify the receipt of the target bundle to an invalid one - let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - let original_submit_bundle_tx = bundle_to_tx(bundle.clone().unwrap()); + let (slot, mut opaque_bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; + let original_submit_bundle_tx = bundle_to_tx(opaque_bundle.clone()); let (bad_receipt_hash, bad_submit_bundle_tx) = { - let mut opaque_bundle = bundle.unwrap(); let receipt = &mut opaque_bundle.sealed_header.header.receipt; receipt.domain_block_hash = Default::default(); opaque_bundle.sealed_header.signature = Sr25519Keyring::Alice @@ -4017,13 +4078,13 @@ async fn test_skip_duplicated_tx_in_previous_bundle() { .await .expect("Failed to send extrinsic"); let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert_eq!(bundle.unwrap().extrinsics.len(), 1); + assert_eq!(bundle.extrinsics.len(), 1); // Produce a few more bundles, all of them will be empty since the only tx in the tx pool is already pick // up by the previous bundle for _ in 0..3 { let (_, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.unwrap().extrinsics.is_empty()); + assert!(bundle.extrinsics.is_empty()); } // Produce a domain that include all the bundles @@ -4038,7 +4099,7 @@ async fn test_skip_duplicated_tx_in_previous_bundle() { .await .expect("Failed to send extrinsic"); let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert_eq!(bundle.unwrap().extrinsics.len(), 1); + assert_eq!(bundle.extrinsics.len(), 1); ferdie .produce_block_with_slot_at(slot, ferdie.client.info().best_hash, Some(vec![])) .await @@ -4047,7 +4108,7 @@ async fn test_skip_duplicated_tx_in_previous_bundle() { // Even the tx is inclued in a previous bundle, after the consensus chain's tip changed, the operator // will resubmit the tx in the next bundle as retry let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert_eq!(bundle.unwrap().extrinsics.len(), 1); + assert_eq!(bundle.extrinsics.len(), 1); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await @@ -4094,7 +4155,7 @@ async fn test_handle_duplicated_tx_with_diff_nonce_in_previous_bundle() { .await .expect("Failed to send extrinsic"); let (_, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert_eq!(bundle.unwrap().extrinsics.len(), 1); + assert_eq!(bundle.extrinsics.len(), 1); // Send a new tx with the same `nonce` and a tip then produce a bundle, this tx will replace // the previous tx in the tx pool and included in the bundle @@ -4103,7 +4164,7 @@ async fn test_handle_duplicated_tx_with_diff_nonce_in_previous_bundle() { .await .expect("Failed to send extrinsic"); let (_, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert_eq!(bundle.unwrap().extrinsics.len(), 1); + assert_eq!(bundle.extrinsics.len(), 1); // Send a tx with `nonce + 1` and produce a bundle, it won't include this tx because the tx // with `nonce` is included in previous bundle and is not submitted to the consensus chain yet @@ -4112,7 +4173,7 @@ async fn test_handle_duplicated_tx_with_diff_nonce_in_previous_bundle() { .await .expect("Failed to send extrinsic"); let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert!(bundle.unwrap().extrinsics.is_empty()); + assert!(bundle.extrinsics.is_empty()); // Produce a domain that include all the bundles produce_block_with!(ferdie.produce_block_with_slot(slot), alice) @@ -4127,7 +4188,7 @@ async fn test_handle_duplicated_tx_with_diff_nonce_in_previous_bundle() { .await .expect("Failed to send extrinsic"); let (slot, bundle) = ferdie.produce_slot_and_wait_for_bundle_submission().await; - assert_eq!(bundle.unwrap().extrinsics.len(), 2); + assert_eq!(bundle.extrinsics.len(), 2); produce_block_with!(ferdie.produce_block_with_slot(slot), alice) .await diff --git a/domains/pallets/block-fees/Cargo.toml b/domains/pallets/block-fees/Cargo.toml index 90372266db..4009c325e0 100644 --- a/domains/pallets/block-fees/Cargo.toml +++ b/domains/pallets/block-fees/Cargo.toml @@ -8,8 +8,8 @@ homepage = "https://subspace.network" repository = "https://github.com/subspace/subspace" description = "Subspace node pallet for charging and re-distributing domain transaction fees" include = [ - "/src", - "/Cargo.toml", + "/src", + "/Cargo.toml", ] [dependencies] @@ -20,18 +20,20 @@ frame-system = { version = "4.0.0-dev", default-features = false, git = "https:/ pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } scale-info = { version = "2.7.0", default-features = false, features = ["derive"] } sp-block-fees = { version = "0.1.0", path = "../../primitives/block-fees", default-features = false } +sp-domains = { version = "0.1.0", path = "../../../crates/sp-domains", default-features = false } sp-runtime = { version = "24.0.0", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } sp-std = { version = "8.0.0", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } [features] default = ["std"] std = [ - "codec/std", - "domain-runtime-primitives/std", - "frame-support/std", - "frame-system/std", - "scale-info/std", - "sp-block-fees/std", - "sp-runtime/std", - "sp-std/std", + "codec/std", + "domain-runtime-primitives/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-block-fees/std", + "sp-domains/std", + "sp-runtime/std", + "sp-std/std", ] diff --git a/domains/pallets/block-fees/src/lib.rs b/domains/pallets/block-fees/src/lib.rs index b26a2c0ba4..624b03452a 100644 --- a/domains/pallets/block-fees/src/lib.rs +++ b/domains/pallets/block-fees/src/lib.rs @@ -24,11 +24,11 @@ pub use pallet::*; #[frame_support::pallet] mod pallet { use codec::{Codec, MaxEncodedLen}; - use domain_runtime_primitives::BlockFees; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use scale_info::TypeInfo; use sp_block_fees::{InherentError, InherentType, INHERENT_IDENTIFIER}; + use sp_domains::BlockFees; use sp_runtime::traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize, Saturating}; use sp_runtime::{FixedPointOperand, SaturatedConversion}; use sp_std::fmt::Debug; @@ -98,10 +98,10 @@ mod pallet { impl Pallet { #[pallet::call_index(0)] #[pallet::weight(( - // TODO: proper weight - Weight::from_all(10_000), - DispatchClass::Mandatory - ))] + // TODO: proper weight + Weight::from_all(10_000), + DispatchClass::Mandatory + ))] pub fn set_next_consensus_chain_byte_fee( origin: OriginFor, #[pallet::compact] transaction_byte_fee: T::Balance, @@ -177,6 +177,14 @@ mod pallet { }); } + /// Note burned balance on domains + pub fn note_burned_balance(burned_balance: T::Balance) { + CollectedBlockFees::::mutate(|block_fees| { + block_fees.burned_balance = + block_fees.burned_balance.saturating_add(burned_balance); + }); + } + /// Return the final domain transaction byte fee, which consist of: /// - The `ConsensusChainByteFee` for the consensus chain storage cost since the domain /// transaction need to be bundled and submitted to the consensus chain first. diff --git a/domains/pallets/transporter/Cargo.toml b/domains/pallets/transporter/Cargo.toml index 88636b6bae..034ce1a800 100644 --- a/domains/pallets/transporter/Cargo.toml +++ b/domains/pallets/transporter/Cargo.toml @@ -8,19 +8,20 @@ homepage = "https://subspace.network" repository = "https://github.com/subspace/subspace" description = "Subspace node pallet to move funds between domains." include = [ - "/src", - "/Cargo.toml", - "/README.md", + "/src", + "/Cargo.toml", + "/README.md", ] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.5", default-features = false, features = ["derive"] } -domain-runtime-primitives = { path = "../../primitives/runtime" , default-features = false } +domain-runtime-primitives = { path = "../../primitives/runtime", default-features = false } frame-benchmarking = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } frame-system = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } scale-info = { version = "2.7.0", default-features = false, features = ["derive"] } sp-core = { version = "21.0.0", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } +sp-domains = { version = "0.1.0", default-features = false, path = "../../../crates/sp-domains" } sp-messenger = { version = "0.1.0", default-features = false, path = "../../primitives/messenger" } sp-runtime = { version = "24.0.0", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } sp-std = { version = "8.0.0", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } @@ -32,22 +33,23 @@ sp-io = { version = "23.0.0", git = "https://github.com/subspace/polkadot-sdk", [features] default = ["std"] std = [ - "codec/std", - "domain-runtime-primitives/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", - "scale-info/std", - "sp-core/std", - "sp-messenger/std", - "sp-runtime/std", - "sp-std/std", + "codec/std", + "domain-runtime-primitives/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-core/std", + "sp-domains/std", + "sp-messenger/std", + "sp-runtime/std", + "sp-std/std", ] try-runtime = ["frame-support/try-runtime"] runtime-benchmarks = [ - "frame-benchmarking", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "sp-messenger/runtime-benchmarks", + "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-messenger/runtime-benchmarks", ] diff --git a/domains/pallets/transporter/src/lib.rs b/domains/pallets/transporter/src/lib.rs index da4b2836e6..dc94ac2bd3 100644 --- a/domains/pallets/transporter/src/lib.rs +++ b/domains/pallets/transporter/src/lib.rs @@ -21,10 +21,16 @@ use codec::{Decode, Encode}; use domain_runtime_primitives::{MultiAccountId, TryConvertBack}; +use frame_support::dispatch::DispatchResult; +use frame_support::ensure; use frame_support::traits::Currency; pub use pallet::*; use scale_info::TypeInfo; +use sp_domains::{DomainId, DomainsTransfersTracker, Transfers}; +use sp_messenger::endpoint::EndpointResponse; use sp_messenger::messages::ChainId; +use sp_runtime::traits::{CheckedAdd, CheckedSub, Get}; +use sp_std::vec; #[cfg(test)] mod mock; @@ -73,6 +79,7 @@ mod pallet { use frame_support::traits::{Currency, ExistenceRequirement, WithdrawReasons}; use frame_support::weights::Weight; use frame_system::pallet_prelude::*; + use sp_domains::{DomainId, DomainsTransfersTracker, Transfers}; use sp_messenger::endpoint::{ Endpoint, EndpointHandler as EndpointHandlerT, EndpointId, EndpointRequest, EndpointResponse, Sender, @@ -80,6 +87,7 @@ mod pallet { use sp_messenger::messages::ChainId; use sp_runtime::traits::Convert; use sp_std::vec; + use sp_std::vec::Vec; #[pallet::config] pub trait Config: frame_system::Config { @@ -89,7 +97,7 @@ mod pallet { /// Gets the chain_id of the current execution environment. type SelfChainId: Get; - /// Gets the endpoint_id of the this pallet in a given execution environment. + /// Gets the endpoint_id of this pallet in a given execution environment. type SelfEndpointId: Get; /// Currency used by this pallet. @@ -123,6 +131,31 @@ mod pallet { OptionQuery, >; + /// Domain balances. + #[pallet::storage] + #[pallet::getter(fn domain_balances)] + pub(super) type DomainBalances = + StorageMap<_, Identity, DomainId, BalanceOf, ValueQuery>; + + /// A temporary storage that tracks total transfers from this chain. + /// Clears on on_initialize for every block. + #[pallet::storage] + #[pallet::getter(fn chain_transfers)] + pub(super) type ChainTransfers = + StorageValue<_, Transfers>, ValueQuery>; + + /// Storage to track unconfirmed transfers between different chains. + #[pallet::storage] + #[pallet::getter(fn unconfirmed_transfers)] + pub(super) type UnconfirmedTransfers = + StorageDoubleMap<_, Identity, ChainId, Identity, ChainId, BalanceOf, ValueQuery>; + + /// Storage to track cancelled transfers between different chains. + #[pallet::storage] + #[pallet::getter(fn cancelled_transfers)] + pub(super) type CancelledTransfers = + StorageDoubleMap<_, Identity, ChainId, Identity, ChainId, BalanceOf, ValueQuery>; + /// Events emitted by pallet-transporter. #[pallet::event] #[pallet::generate_deposit(pub (super) fn deposit_event)] @@ -177,6 +210,16 @@ mod pallet { UnexpectedMessage, /// Emits when the account id type is invalid. InvalidAccountId, + /// Emits when from_chain do not have enough funds to finalize the transfer. + LowBalanceOnDomain, + /// Emits when the transfer tracking was called from non-consensus chain + NonConsensusChain, + /// Emits when balance overflow + BalanceOverflow, + /// Emits when balance underflow + BalanceUnderflow, + /// Emits when domain balance is already initialized + DomainBalanceAlreadyInitialized, } #[pallet::call] @@ -229,10 +272,36 @@ mod pallet { chain_id: dst_chain_id, message_id, }); + + // if this is consensus chain, then note the transfer + // else add transfer to storage to send through ER to consensus chain + if T::SelfChainId::get().is_consensus_chain() { + Self::note_transfer(T::SelfChainId::get(), dst_chain_id, amount)? + } else { + ChainTransfers::::try_mutate(|transfers| { + Self::update_transfer_out(transfers, dst_chain_id, amount) + })?; + } + Ok(()) } } + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_n: BlockNumberFor) -> Weight { + ChainTransfers::::set(Default::default()); + T::DbWeight::get().writes(1) + } + } + + impl Pallet { + pub fn transfers_storage_key() -> Vec { + use frame_support::storage::generator::StorageValue; + ChainTransfers::::storage_value_final_key().to_vec() + } + } + /// Endpoint handler implementation for pallet transporter. #[derive(Debug)] pub struct EndpointHandler(pub PhantomData); @@ -262,18 +331,21 @@ mod pallet { Err(_) => return Err(Error::::InvalidPayload.into()), }; - // mint the funds to dst_account - let account_id = T::AccountIdConverter::try_convert_back(req.receiver.account_id) - .ok_or(Error::::InvalidAccountId)?; + let amount = req.amount; + let response = Pallet::::finalize_transfer(src_chain_id, message_id, req); + if response.is_err() { + // if this is consensus chain, then reject the transfer + // else update the Transfers storage with rejected transfer + if T::SelfChainId::get().is_consensus_chain() { + Pallet::::reject_transfer(src_chain_id, T::SelfChainId::get(), amount)?; + } else { + ChainTransfers::::try_mutate(|transfers| { + Pallet::::update_transfer_rejected(transfers, src_chain_id, amount) + })?; + } + } - let _imbalance = T::Currency::deposit_creating(&account_id, req.amount); - frame_system::Pallet::::deposit_event(Into::<::RuntimeEvent>::into( - Event::::IncomingTransferSuccessful { - chain_id: src_chain_id, - message_id, - }, - )); - Ok(vec![]) + response } fn message_weight(&self) -> Weight { @@ -315,6 +387,25 @@ mod pallet { T::AccountIdConverter::try_convert_back(transfer.sender.account_id) .ok_or(Error::::InvalidAccountId)?; let _imbalance = T::Currency::deposit_creating(&account_id, transfer.amount); + + // if this is consensus chain, then revert the transfer + // else update the Transfers storage with reverted transfer + if T::SelfChainId::get().is_consensus_chain() { + Pallet::::claim_rejected_transfer( + T::SelfChainId::get(), + dst_chain_id, + transfer.amount, + )?; + } else { + ChainTransfers::::try_mutate(|transfers| { + Pallet::::update_transfer_revert( + transfers, + dst_chain_id, + transfer.amount, + ) + })?; + } + frame_system::Pallet::::deposit_event( Into::<::RuntimeEvent>::into( Event::::OutgoingTransferFailed { @@ -335,3 +426,243 @@ mod pallet { } } } + +impl sp_domains::DomainsTransfersTracker> for Pallet { + type Error = Error; + + fn initialize_domain_balance( + domain_id: DomainId, + amount: BalanceOf, + ) -> Result<(), Self::Error> { + Self::ensure_consensus_chain()?; + + ensure!( + !DomainBalances::::contains_key(domain_id), + Error::DomainBalanceAlreadyInitialized + ); + + DomainBalances::::set(domain_id, amount); + Ok(()) + } + + fn note_transfer( + from_chain_id: ChainId, + to_chain_id: ChainId, + amount: BalanceOf, + ) -> Result<(), Self::Error> { + Self::ensure_consensus_chain()?; + + if let Some(domain_id) = from_chain_id.maybe_domain_chain() { + DomainBalances::::try_mutate(domain_id, |current_balance| { + *current_balance = current_balance + .checked_sub(&amount) + .ok_or(Error::LowBalanceOnDomain)?; + Ok(()) + })?; + } + + UnconfirmedTransfers::::try_mutate(from_chain_id, to_chain_id, |total_amount| { + *total_amount = total_amount + .checked_add(&amount) + .ok_or(Error::BalanceOverflow)?; + Ok(()) + })?; + + Ok(()) + } + + fn confirm_transfer( + from_chain_id: ChainId, + to_chain_id: ChainId, + amount: BalanceOf, + ) -> Result<(), Self::Error> { + Self::ensure_consensus_chain()?; + UnconfirmedTransfers::::try_mutate(from_chain_id, to_chain_id, |total_amount| { + *total_amount = total_amount + .checked_sub(&amount) + .ok_or(Error::BalanceUnderflow)?; + Ok(()) + })?; + + if let Some(domain_id) = to_chain_id.maybe_domain_chain() { + DomainBalances::::try_mutate(domain_id, |current_balance| { + *current_balance = current_balance + .checked_add(&amount) + .ok_or(Error::BalanceOverflow)?; + Ok(()) + })?; + } + + Ok(()) + } + + fn claim_rejected_transfer( + from_chain_id: ChainId, + to_chain_id: ChainId, + amount: BalanceOf, + ) -> Result<(), Self::Error> { + Self::ensure_consensus_chain()?; + CancelledTransfers::::try_mutate(from_chain_id, to_chain_id, |total_amount| { + *total_amount = total_amount + .checked_sub(&amount) + .ok_or(Error::BalanceUnderflow)?; + Ok(()) + })?; + + if let Some(domain_id) = from_chain_id.maybe_domain_chain() { + DomainBalances::::try_mutate(domain_id, |current_balance| { + *current_balance = current_balance + .checked_add(&amount) + .ok_or(Error::BalanceOverflow)?; + Ok(()) + })?; + } + Ok(()) + } + + fn reject_transfer( + from_chain_id: ChainId, + to_chain_id: ChainId, + amount: BalanceOf, + ) -> Result<(), Self::Error> { + Self::ensure_consensus_chain()?; + UnconfirmedTransfers::::try_mutate(from_chain_id, to_chain_id, |total_amount| { + *total_amount = total_amount + .checked_sub(&amount) + .ok_or(Error::BalanceUnderflow)?; + Ok(()) + })?; + + CancelledTransfers::::try_mutate(from_chain_id, to_chain_id, |total_amount| { + *total_amount = total_amount + .checked_add(&amount) + .ok_or(Error::BalanceOverflow)?; + Ok(()) + })?; + Ok(()) + } + + fn reduce_domain_balance(domain_id: DomainId, amount: BalanceOf) -> Result<(), Self::Error> { + DomainBalances::::try_mutate(domain_id, |current_balance| { + *current_balance = current_balance + .checked_sub(&amount) + .ok_or(Error::LowBalanceOnDomain)?; + Ok(()) + }) + } +} + +impl Pallet { + fn ensure_consensus_chain() -> Result<(), Error> { + ensure!( + T::SelfChainId::get().is_consensus_chain(), + Error::NonConsensusChain + ); + + Ok(()) + } + + fn finalize_transfer( + src_chain_id: ChainId, + message_id: MessageIdOf, + req: Transfer>, + ) -> EndpointResponse { + // mint the funds to dst_account + let account_id = T::AccountIdConverter::try_convert_back(req.receiver.account_id) + .ok_or(Error::::InvalidAccountId)?; + + let _imbalance = T::Currency::deposit_creating(&account_id, req.amount); + + // if this is consensus chain, then confirm the transfer + // else add transfer to storage to send through ER to consensus chain + if T::SelfChainId::get().is_consensus_chain() { + Pallet::::confirm_transfer(src_chain_id, T::SelfChainId::get(), req.amount)? + } else { + ChainTransfers::::try_mutate(|transfers| { + Pallet::::update_transfer_in(transfers, src_chain_id, req.amount) + })?; + } + + frame_system::Pallet::::deposit_event(Into::<::RuntimeEvent>::into( + Event::::IncomingTransferSuccessful { + chain_id: src_chain_id, + message_id, + }, + )); + Ok(vec![]) + } + + fn update_transfer_out( + transfers: &mut Transfers>, + to_chain_id: ChainId, + amount: BalanceOf, + ) -> DispatchResult { + let total_transfer = + if let Some(current_transfer_amount) = transfers.transfers_out.get(&to_chain_id) { + current_transfer_amount + .checked_add(&amount) + .ok_or(Error::::BalanceOverflow)? + } else { + amount + }; + transfers.transfers_out.insert(to_chain_id, total_transfer); + Ok(()) + } + + fn update_transfer_in( + transfers: &mut Transfers>, + from_chain_id: ChainId, + amount: BalanceOf, + ) -> DispatchResult { + let total_transfer = + if let Some(current_transfer_amount) = transfers.transfers_in.get(&from_chain_id) { + current_transfer_amount + .checked_add(&amount) + .ok_or(Error::::BalanceOverflow)? + } else { + amount + }; + transfers.transfers_in.insert(from_chain_id, total_transfer); + Ok(()) + } + + fn update_transfer_revert( + transfers: &mut Transfers>, + to_chain_id: ChainId, + amount: BalanceOf, + ) -> DispatchResult { + let total_transfer = if let Some(current_transfer_amount) = + transfers.rejected_transfers_claimed.get(&to_chain_id) + { + current_transfer_amount + .checked_add(&amount) + .ok_or(Error::::BalanceOverflow)? + } else { + amount + }; + transfers + .rejected_transfers_claimed + .insert(to_chain_id, total_transfer); + Ok(()) + } + + fn update_transfer_rejected( + transfers: &mut Transfers>, + from_chain_id: ChainId, + amount: BalanceOf, + ) -> DispatchResult { + let total_transfer = if let Some(current_transfer_amount) = + transfers.transfers_rejected.get(&from_chain_id) + { + current_transfer_amount + .checked_add(&amount) + .ok_or(Error::::BalanceOverflow)? + } else { + amount + }; + transfers + .transfers_rejected + .insert(from_chain_id, total_transfer); + Ok(()) + } +} diff --git a/domains/primitives/runtime/Cargo.toml b/domains/primitives/runtime/Cargo.toml index b5aca12904..ec815e8816 100644 --- a/domains/primitives/runtime/Cargo.toml +++ b/domains/primitives/runtime/Cargo.toml @@ -12,10 +12,12 @@ description = "Common primitives of subspace domain runtime" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +fp-account = { version = "1.0.0-dev", default-features = false, git = "https://github.com/subspace/frontier", rev = "7627e61d80275a4cf24d06f27491f6c31eadb7b7" } frame-support = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } frame-system = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } parity-scale-codec = { version = "3.6.9", default-features = false, features = ["derive"] } scale-info = { version = "2.7.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.195", default-features = false, features = ["alloc", "derive"] } sp-api = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } sp-core = { version = "21.0.0", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } sp-runtime = { version = "24.0.0", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } @@ -27,10 +29,12 @@ sp-weights = { version = "20.0.0", default-features = false, git = "https://gith [features] default = ["std"] std = [ + "fp-account/std", "frame-support/std", "frame-system/std", "parity-scale-codec/std", "scale-info/std", + "serde/std", "sp-api/std", "sp-core/std", "sp-runtime/std", diff --git a/domains/primitives/runtime/src/lib.rs b/domains/primitives/runtime/src/lib.rs index 19b9d90cba..da5b016568 100644 --- a/domains/primitives/runtime/src/lib.rs +++ b/domains/primitives/runtime/src/lib.rs @@ -20,19 +20,20 @@ extern crate alloc; use alloc::string::String; +pub use fp_account::AccountId20; use frame_support::dispatch::{DispatchClass, PerDispatchClass}; use frame_support::weights::constants::{BlockExecutionWeight, ExtrinsicBaseWeight}; use frame_system::limits::{BlockLength, BlockWeights}; use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; -use sp_runtime::generic::{Era, UncheckedExtrinsic}; -use sp_runtime::traits::{Block as BlockT, Convert, IdentifyAccount, NumberFor, Verify}; +use serde::{Deserialize, Serialize}; +use sp_runtime::generic::UncheckedExtrinsic; +use sp_runtime::traits::{Convert, IdentifyAccount, Verify}; use sp_runtime::transaction_validity::TransactionValidityError; -use sp_runtime::{Digest, MultiAddress, MultiSignature, Perbill}; +use sp_runtime::{MultiAddress, MultiSignature, Perbill}; use sp_std::vec::Vec; use sp_weights::Weight; -use subspace_core_primitives::U256; -use subspace_runtime_primitives::{Moment, SHANNON}; +use subspace_runtime_primitives::SHANNON; /// Alias to 512-bit hash when used in the context of a transaction signature on the chain. pub type Signature = MultiSignature; @@ -131,7 +132,9 @@ where } /// MultiAccountId used by all the domains to describe their account type. -#[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[derive( + Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo, Serialize, Deserialize, Ord, PartialOrd, +)] pub enum MultiAccountId { /// 32 byte account Id. AccountId32([u8; 32]), @@ -165,6 +168,24 @@ impl TryConvertBack for AccountIdConverter { } } +/// An AccountId20 to MultiAccount converter. +pub struct AccountId20Converter; + +impl Convert for AccountId20Converter { + fn convert(account_id: AccountId20) -> MultiAccountId { + MultiAccountId::AccountId20(account_id.into()) + } +} + +impl TryConvertBack for AccountId20Converter { + fn try_convert_back(multi_account_id: MultiAccountId) -> Option { + match multi_account_id { + MultiAccountId::AccountId20(acc) => Some(AccountId20::from(acc)), + _ => None, + } + } +} + #[derive(Debug, Decode, Encode, TypeInfo, PartialEq, Eq, Clone)] pub struct CheckExtrinsicsValidityError { pub extrinsic_index: u32, @@ -180,24 +201,6 @@ pub struct DecodeExtrinsicError(pub String); pub const CHECK_EXTRINSICS_AND_DO_PRE_DISPATCH_METHOD_NAME: &str = "DomainCoreApi_check_extrinsics_and_do_pre_dispatch"; -#[derive(Clone, Debug, Decode, Default, Encode, Eq, PartialEq, TypeInfo)] -pub struct BlockFees { - /// The consensus chain storage fee - pub consensus_storage_fee: Balance, - /// The domain execution fee including the storage and compute fee on domain chain, - /// tip, and the XDM reward. - pub domain_execution_fee: Balance, -} - -impl BlockFees { - pub fn new(domain_execution_fee: Balance, consensus_storage_fee: Balance) -> Self { - BlockFees { - consensus_storage_fee, - domain_execution_fee, - } - } -} - /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades @@ -219,75 +222,10 @@ pub mod opaque { pub type AccountId = Vec; } -sp_api::decl_runtime_apis! { - /// Base API that every domain runtime must implement. - pub trait DomainCoreApi { - /// Extracts the optional signer per extrinsic. - fn extract_signer( - extrinsics: Vec<::Extrinsic>, - ) -> Vec<(Option, ::Extrinsic)>; - - fn is_within_tx_range( - extrinsic: &::Extrinsic, - bundle_vrf_hash: &U256, - tx_range: &U256, - ) -> bool; - - /// Returns the intermediate storage roots in an encoded form. - fn intermediate_roots() -> Vec<[u8; 32]>; - - /// Returns the storage root after initializing the block. - fn initialize_block_with_post_state_root(header: &::Header) -> Vec; - - /// Returns the storage root after applying the extrinsic. - fn apply_extrinsic_with_post_state_root(extrinsic: ::Extrinsic) -> Vec; - - /// Returns an encoded extrinsic aiming to upgrade the runtime using given code. - fn construct_set_code_extrinsic(code: Vec) -> Vec; - - /// Returns an encoded extrinsic to set timestamp. - fn construct_timestamp_extrinsic(moment: Moment) -> Block::Extrinsic; - - /// Returns an encoded extrinsic to set domain transaction byte fee. - fn construct_consensus_chain_byte_fee_extrinsic(consensus_chain_byte_fee: Balance) -> Block::Extrinsic; - - /// Returns true if the extrinsic is an inherent extrinsic. - fn is_inherent_extrinsic(extrinsic: &::Extrinsic) -> bool; - - /// Checks the validity of array of extrinsics + pre_dispatch - /// returning failure on first extrinsic that fails runtime call. - /// IMPORTANT: Change `CHECK_EXTRINSICS_AND_DO_PRE_DISPATCH_METHOD_NAME` constant when this method name is changed - fn check_extrinsics_and_do_pre_dispatch(uxts: Vec<::Extrinsic>, block_number: NumberFor, - block_hash: ::Hash) -> Result<(), CheckExtrinsicsValidityError>; - - /// Decodes the domain specific extrinsic from the opaque extrinsic. - fn decode_extrinsic( - opaque_extrinsic: sp_runtime::OpaqueExtrinsic, - ) -> Result<::Extrinsic, DecodeExtrinsicError>; - - /// Returns extrinsic Era if present - fn extrinsic_era( - extrinsic: &::Extrinsic - ) -> Option; - - /// Return the extrinsic weight - fn extrinsic_weight(ext: &Block::Extrinsic) -> Weight; - - /// The accumulated transaction fee of all transactions included in the block - fn block_fees() -> BlockFees; - - /// Return the block digest - fn block_digest() -> Digest; - - /// Return the consumed weight of the block - #[api_version(2)] - fn block_weight() -> Weight; - } -} - #[cfg(test)] mod test { use super::block_weights; + #[test] fn test_block_weights() { // validate and build block weights diff --git a/domains/runtime/evm/src/lib.rs b/domains/runtime/evm/src/lib.rs index abb47c0dd9..31546f2311 100644 --- a/domains/runtime/evm/src/lib.rs +++ b/domains/runtime/evm/src/lib.rs @@ -17,15 +17,16 @@ pub use domain_runtime_primitives::{ EXISTENTIAL_DEPOSIT, MAXIMUM_BLOCK_WEIGHT, }; use domain_runtime_primitives::{ - CheckExtrinsicsValidityError, DecodeExtrinsicError, MultiAccountId, TryConvertBack, - SLOT_DURATION, + CheckExtrinsicsValidityError, DecodeExtrinsicError, SLOT_DURATION, }; use fp_account::EthereumSignature; use fp_self_contained::{CheckedSignature, SelfContainedCall}; use frame_support::dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo}; use frame_support::inherent::ProvideInherent; +use frame_support::traits::fungible::Credit; use frame_support::traits::{ ConstU16, ConstU32, ConstU64, Currency, Everything, FindAuthor, Imbalance, OnFinalize, + OnUnbalanced, }; use frame_support::weights::constants::{ParityDbWeight, WEIGHT_REF_TIME_PER_SECOND}; use frame_support::weights::{ConstantMultiplier, IdentityFee, Weight}; @@ -42,16 +43,16 @@ use pallet_transporter::EndpointHandler; use sp_api::impl_runtime_apis; use sp_core::crypto::KeyTypeId; use sp_core::{Get, OpaqueMetadata, H160, H256, U256}; -use sp_domains::DomainId; +use sp_domains::{DomainId, Transfers}; use sp_messenger::endpoint::{Endpoint, EndpointHandler as EndpointHandlerT, EndpointId}; use sp_messenger::messages::{BlockMessagesWithStorageKey, ChainId, CrossDomainMessage, MessageId}; use sp_messenger_host_functions::{get_storage_key, StorageKeyRequest}; use sp_mmr_primitives::{EncodableOpaqueLeaf, Proof}; use sp_runtime::generic::Era; use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, Checkable, Convert, DispatchInfoOf, Dispatchable, - IdentifyAccount, IdentityLookup, Keccak256, One, PostDispatchInfoOf, SignedExtension, - UniqueSaturatedInto, ValidateUnsigned, Verify, Zero, + BlakeTwo256, Block as BlockT, Checkable, DispatchInfoOf, Dispatchable, IdentifyAccount, + IdentityLookup, Keccak256, One, PostDispatchInfoOf, SignedExtension, UniqueSaturatedInto, + ValidateUnsigned, Verify, Zero, }; use sp_runtime::transaction_validity::{ InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, @@ -299,6 +300,15 @@ parameter_types! { pub const MaxReserves: u32 = 50; } +/// `DustRemovalHandler` used to collect all the SSC dust left when the account is reaped. +pub struct DustRemovalHandler; + +impl OnUnbalanced> for DustRemovalHandler { + fn on_nonzero_unbalanced(dusted_amount: Credit) { + BlockFees::note_burned_balance(dusted_amount.peek()); + } +} + impl pallet_balances::Config for Runtime { type RuntimeFreezeReason = RuntimeFreezeReason; type MaxLocks = MaxLocks; @@ -306,7 +316,7 @@ impl pallet_balances::Config for Runtime { type Balance = Balance; /// The ubiquitous event type. type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); + type DustRemoval = DustRemovalHandler; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight; @@ -331,6 +341,7 @@ impl pallet_block_fees::Config for Runtime { type NegativeImbalance = >::NegativeImbalance; pub struct FinalDomainTransactionByteFee; + impl Get for FinalDomainTransactionByteFee { fn get() -> Balance { BlockFees::final_domain_transaction_byte_fee() @@ -347,6 +358,7 @@ impl pallet_transaction_payment::Config for Runtime { } pub struct ExtrinsicStorageFees; + impl domain_pallet_executive::ExtrinsicStorageFees for ExtrinsicStorageFees { fn extract_signer(xt: UncheckedExtrinsic) -> (Option, DispatchInfo) { let dispatch_info = xt.get_dispatch_info(); @@ -400,6 +412,7 @@ impl sp_messenger::OnXDMRewards for OnXDMRewards { type MmrHash = ::Output; pub struct MmrProofVerifier; + impl sp_messenger::MmrProofVerifier for MmrProofVerifier { fn verify_proof_and_extract_consensus_state_root( opaque_leaf: EncodableOpaqueLeaf, @@ -414,6 +427,7 @@ impl sp_messenger::MmrProofVerifier for MmrProofVerifier { } pub struct StorageKeys; + impl sp_messenger::StorageKeys for StorageKeys { fn confirmed_domain_block_storage_key(domain_id: DomainId) -> Option> { get_storage_key(StorageKeyRequest::ConfirmedDomainBlockStorageKey(domain_id)) @@ -468,30 +482,13 @@ parameter_types! { pub const TransporterEndpointId: EndpointId = 1; } -pub struct AccountId20Converter; - -impl Convert for AccountId20Converter { - fn convert(account_id: AccountId) -> MultiAccountId { - MultiAccountId::AccountId20(account_id.into()) - } -} - -impl TryConvertBack for AccountId20Converter { - fn try_convert_back(multi_account_id: MultiAccountId) -> Option { - match multi_account_id { - MultiAccountId::AccountId20(acc) => Some(AccountId::from(acc)), - _ => None, - } - } -} - impl pallet_transporter::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SelfChainId = SelfChainId; type SelfEndpointId = TransporterEndpointId; type Currency = Balances; type Sender = Messenger; - type AccountIdConverter = AccountId20Converter; + type AccountIdConverter = domain_runtime_primitives::AccountId20Converter; type WeightInfo = pallet_transporter::weights::SubstrateWeight; } @@ -912,8 +909,7 @@ impl_runtime_apis! { } } - #[api_version(2)] - impl domain_runtime_primitives::DomainCoreApi for Runtime { + impl sp_domains::core_api::DomainCoreApi for Runtime { fn extract_signer( extrinsics: Vec<::Extrinsic>, ) -> Vec<(Option, ::Extrinsic)> { @@ -1016,7 +1012,7 @@ impl_runtime_apis! { ext.get_dispatch_info().weight } - fn block_fees() -> domain_runtime_primitives::BlockFees { + fn block_fees() -> sp_domains::BlockFees { BlockFees::collected_block_fees() } @@ -1033,6 +1029,14 @@ impl_runtime_apis! { pallet_block_fees::Call::set_next_consensus_chain_byte_fee{ transaction_byte_fee }.into() ) } + + fn transfers() -> Transfers { + Transporter::chain_transfers() + } + + fn transfers_storage_key() -> Vec { + Transporter::transfers_storage_key() + } } impl sp_messenger::MessengerApi for Runtime { diff --git a/domains/service/src/domain.rs b/domains/service/src/domain.rs index 7f66fc7fd8..42729ba18d 100644 --- a/domains/service/src/domain.rs +++ b/domains/service/src/domain.rs @@ -6,7 +6,7 @@ use domain_client_block_preprocessor::inherents::CreateInherentDataProvider; use domain_client_message_relayer::GossipMessageSink; use domain_client_operator::{Operator, OperatorParams, OperatorStreams}; use domain_runtime_primitives::opaque::{Block, Header}; -use domain_runtime_primitives::{Balance, DomainCoreApi, Hash}; +use domain_runtime_primitives::{Balance, Hash}; use futures::channel::mpsc; use futures::Stream; use pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi; @@ -31,6 +31,7 @@ use sp_consensus::SyncOracle; use sp_consensus_slots::Slot; use sp_core::traits::SpawnEssentialNamed; use sp_core::{Decode, Encode, H256}; +use sp_domains::core_api::DomainCoreApi; use sp_domains::{BundleProducerElectionApi, DomainId, DomainsApi, OperatorId}; use sp_domains_fraud_proof::FraudProofApi; use sp_messenger::messages::ChainId; diff --git a/domains/test/runtime/evm/src/lib.rs b/domains/test/runtime/evm/src/lib.rs index 13f303eff7..8c9a05f94d 100644 --- a/domains/test/runtime/evm/src/lib.rs +++ b/domains/test/runtime/evm/src/lib.rs @@ -13,8 +13,7 @@ extern crate alloc; use codec::{Decode, Encode}; pub use domain_runtime_primitives::opaque::Header; use domain_runtime_primitives::{ - block_weights, maximum_block_length, MultiAccountId, TryConvertBack, EXISTENTIAL_DEPOSIT, - MAXIMUM_BLOCK_WEIGHT, SLOT_DURATION, + block_weights, maximum_block_length, EXISTENTIAL_DEPOSIT, MAXIMUM_BLOCK_WEIGHT, SLOT_DURATION, }; pub use domain_runtime_primitives::{ opaque, Balance, BlockNumber, CheckExtrinsicsValidityError, DecodeExtrinsicError, Hash, Nonce, @@ -41,7 +40,7 @@ use pallet_transporter::EndpointHandler; use sp_api::impl_runtime_apis; use sp_core::crypto::KeyTypeId; use sp_core::{Get, OpaqueMetadata, H160, H256, U256}; -use sp_domains::DomainId; +use sp_domains::{DomainId, Transfers}; use sp_messenger::endpoint::{Endpoint, EndpointHandler as EndpointHandlerT, EndpointId}; use sp_messenger::messages::{ BlockMessagesWithStorageKey, ChainId, ChannelId, CrossDomainMessage, MessageId, @@ -50,9 +49,9 @@ use sp_messenger_host_functions::{get_storage_key, StorageKeyRequest}; use sp_mmr_primitives::{EncodableOpaqueLeaf, Proof}; use sp_runtime::generic::Era; use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, Checkable, Convert, DispatchInfoOf, Dispatchable, - IdentifyAccount, IdentityLookup, Keccak256, One, PostDispatchInfoOf, SignedExtension, - UniqueSaturatedInto, ValidateUnsigned, Verify, Zero, + BlakeTwo256, Block as BlockT, Checkable, DispatchInfoOf, Dispatchable, IdentifyAccount, + IdentityLookup, Keccak256, One, PostDispatchInfoOf, SignedExtension, UniqueSaturatedInto, + ValidateUnsigned, Verify, Zero, }; use sp_runtime::transaction_validity::{ InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, @@ -330,6 +329,7 @@ impl pallet_block_fees::Config for Runtime { } pub struct FinalDomainTransactionByteFee; + impl Get for FinalDomainTransactionByteFee { fn get() -> Balance { BlockFees::final_domain_transaction_byte_fee() @@ -346,6 +346,7 @@ impl pallet_transaction_payment::Config for Runtime { } pub struct ExtrinsicStorageFees; + impl domain_pallet_executive::ExtrinsicStorageFees for ExtrinsicStorageFees { fn extract_signer(xt: UncheckedExtrinsic) -> (Option, DispatchInfo) { let dispatch_info = xt.get_dispatch_info(); @@ -400,6 +401,7 @@ impl sp_messenger::OnXDMRewards for OnXDMRewards { type MmrHash = ::Output; pub struct MmrProofVerifier; + impl sp_messenger::MmrProofVerifier for MmrProofVerifier { fn verify_proof_and_extract_consensus_state_root( opaque_leaf: EncodableOpaqueLeaf, @@ -414,6 +416,7 @@ impl sp_messenger::MmrProofVerifier for MmrProofVerifier { } pub struct StorageKeys; + impl sp_messenger::StorageKeys for StorageKeys { fn confirmed_domain_block_storage_key(domain_id: DomainId) -> Option> { get_storage_key(StorageKeyRequest::ConfirmedDomainBlockStorageKey(domain_id)) @@ -468,30 +471,13 @@ parameter_types! { pub const TransporterEndpointId: EndpointId = 1; } -pub struct AccountId20Converter; - -impl Convert for AccountId20Converter { - fn convert(account_id: AccountId) -> MultiAccountId { - MultiAccountId::AccountId20(account_id.into()) - } -} - -impl TryConvertBack for AccountId20Converter { - fn try_convert_back(multi_account_id: MultiAccountId) -> Option { - match multi_account_id { - MultiAccountId::AccountId20(acc) => Some(AccountId::from(acc)), - _ => None, - } - } -} - impl pallet_transporter::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SelfChainId = SelfChainId; type SelfEndpointId = TransporterEndpointId; type Currency = Balances; type Sender = Messenger; - type AccountIdConverter = AccountId20Converter; + type AccountIdConverter = domain_runtime_primitives::AccountId20Converter; type WeightInfo = pallet_transporter::weights::SubstrateWeight; } @@ -898,8 +884,7 @@ impl_runtime_apis! { } } - #[api_version(2)] - impl domain_runtime_primitives::DomainCoreApi for Runtime { + impl sp_domains::core_api::DomainCoreApi for Runtime { fn extract_signer( extrinsics: Vec<::Extrinsic>, ) -> Vec<(Option, ::Extrinsic)> { @@ -1002,7 +987,7 @@ impl_runtime_apis! { ext.get_dispatch_info().weight } - fn block_fees() -> domain_runtime_primitives::BlockFees { + fn block_fees() -> sp_domains::BlockFees { BlockFees::collected_block_fees() } @@ -1019,6 +1004,14 @@ impl_runtime_apis! { pallet_block_fees::Call::set_next_consensus_chain_byte_fee{ transaction_byte_fee }.into() ) } + + fn transfers() -> Transfers { + Transporter::chain_transfers() + } + + fn transfers_storage_key() -> Vec { + Transporter::transfers_storage_key() + } } impl sp_messenger::MessengerApi for Runtime { diff --git a/domains/test/service/src/domain.rs b/domains/test/service/src/domain.rs index 4d37fc5fef..bb20baccb7 100644 --- a/domains/test/service/src/domain.rs +++ b/domains/test/service/src/domain.rs @@ -8,7 +8,7 @@ use crate::{ use cross_domain_message_gossip::ChainTxPoolMsg; use domain_client_operator::{fetch_domain_bootstrap_info, BootstrapResult, OperatorStreams}; use domain_runtime_primitives::opaque::Block; -use domain_runtime_primitives::{Balance, DomainCoreApi}; +use domain_runtime_primitives::Balance; use domain_service::providers::DefaultProvider; use domain_service::{FullClient, RuntimeExecutor}; use domain_test_primitives::OnchainStateApi; @@ -29,6 +29,7 @@ use serde::de::DeserializeOwned; use sp_api::{ApiExt, ConstructRuntimeApi, Metadata, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder; use sp_core::{Decode, Encode, H256}; +use sp_domains::core_api::DomainCoreApi; use sp_domains::DomainId; use sp_messenger::messages::ChainId; use sp_messenger::{MessengerApi, RelayerApi}; diff --git a/test/subspace-test-client/Cargo.toml b/test/subspace-test-client/Cargo.toml index 25c7f38e8c..fd78ecbca7 100644 --- a/test/subspace-test-client/Cargo.toml +++ b/test/subspace-test-client/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.5", features = ["derive"] } +domain-runtime-primitives = { version = "0.1.0", path = "../../domains/primitives/runtime" } evm-domain-test-runtime = { version = "0.1.0", path = "../../domains/test/runtime/evm" } fp-evm = { version = "3.0.0-dev", git = "https://github.com/subspace/frontier", rev = "7627e61d80275a4cf24d06f27491f6c31eadb7b7" } futures = "0.3.29" diff --git a/test/subspace-test-client/src/chain_spec.rs b/test/subspace-test-client/src/chain_spec.rs index 3f200e0d40..ecb0454af0 100644 --- a/test/subspace-test-client/src/chain_spec.rs +++ b/test/subspace-test-client/src/chain_spec.rs @@ -2,11 +2,12 @@ use crate::domain_chain_spec::testnet_evm_genesis; use codec::Encode; +use domain_runtime_primitives::AccountId20Converter; use sc_chain_spec::{ChainType, GenericChainSpec}; use sp_core::{sr25519, Pair, Public}; use sp_domains::storage::RawGenesis; use sp_domains::{GenesisDomain, OperatorAllowList, OperatorPublicKey, RuntimeType}; -use sp_runtime::traits::{IdentifyAccount, Verify}; +use sp_runtime::traits::{Convert, IdentifyAccount, Verify}; use sp_runtime::{BuildStorage, Percent}; use std::marker::PhantomData; use std::num::NonZeroU32; @@ -48,8 +49,15 @@ pub fn subspace_local_testnet_config() -> TestChainSpec { // Sudo account get_account_id_from_seed("Alice"), // Pre-funded accounts + // Alice also get more funds that are used during the domain instantiation vec![ - (get_account_id_from_seed("Alice"), 1_000 * SSC), + ( + get_account_id_from_seed("Alice"), + (5_000 + + crate::domain_chain_spec::endowed_accounts().len() as Balance + * 2_000_000) + * SSC, + ), (get_account_id_from_seed("Bob"), 1_000 * SSC), (get_account_id_from_seed("Charlie"), 1_000 * SSC), (get_account_id_from_seed("Dave"), 1_000 * SSC), @@ -138,6 +146,16 @@ fn create_genesis_config( signing_key: get_from_seed::("Alice"), minimum_nominator_stake: 100 * SSC, nomination_tax: Percent::from_percent(5), + initial_balances: crate::domain_chain_spec::endowed_accounts() + .iter() + .cloned() + .map(|k| { + ( + AccountId20Converter::convert(k), + 2_000_000 * subspace_runtime_primitives::SSC, + ) + }) + .collect(), }), }, } diff --git a/test/subspace-test-client/src/domain_chain_spec.rs b/test/subspace-test-client/src/domain_chain_spec.rs index f95b3f60a8..1fd60b07ea 100644 --- a/test/subspace-test-client/src/domain_chain_spec.rs +++ b/test/subspace-test-client/src/domain_chain_spec.rs @@ -6,7 +6,6 @@ use evm_domain_test_runtime::{ use sp_core::{ecdsa, Pair, Public}; use sp_domains::DomainId; use sp_runtime::traits::{IdentifyAccount, Verify}; -use subspace_runtime_primitives::SSC; type AccountPublic = ::Signer; @@ -23,7 +22,7 @@ where .into_account() } -fn endowed_accounts() -> Vec { +pub(crate) fn endowed_accounts() -> Vec { vec![ get_account_id_from_seed::("Alice"), get_account_id_from_seed::("Bob"), @@ -52,13 +51,7 @@ pub fn testnet_evm_genesis() -> RuntimeGenesisConfig { RuntimeGenesisConfig { system: evm_domain_test_runtime::SystemConfig::default(), - balances: evm_domain_test_runtime::BalancesConfig { - balances: endowed_accounts() - .iter() - .cloned() - .map(|k| (k, 2_000_000 * SSC)) - .collect(), - }, + balances: evm_domain_test_runtime::BalancesConfig::default(), sudo: evm_domain_test_runtime::SudoConfig { key: Some(alice) }, evm_chain_id: evm_domain_test_runtime::EVMChainIdConfig { chain_id: 100, diff --git a/test/subspace-test-client/src/lib.rs b/test/subspace-test-client/src/lib.rs index b6e32a7b7e..c9ed40e8b9 100644 --- a/test/subspace-test-client/src/lib.rs +++ b/test/subspace-test-client/src/lib.rs @@ -31,6 +31,7 @@ use sp_api::ProvideRuntimeApi; use sp_consensus_subspace::{FarmerPublicKey, FarmerSignature, SubspaceApi}; use sp_core::{Decode, Encode}; use std::num::{NonZeroU64, NonZeroUsize}; +use std::slice; use std::sync::Arc; use subspace_core_primitives::crypto::kzg::{embedded_kzg_settings, Kzg}; use subspace_core_primitives::objects::BlockObjectMapping; @@ -245,7 +246,8 @@ where sector_metadata_output: &mut sector_metadata, downloading_semaphore: None, encoding_semaphore: None, - table_generator: &mut table_generator, + table_generators: slice::from_mut(&mut table_generator), + abort_early: &Default::default(), }) .await .expect("Plotting one sector in memory must not fail"); diff --git a/test/subspace-test-runtime/src/lib.rs b/test/subspace-test-runtime/src/lib.rs index 458c568ad6..cc9d1f24ae 100644 --- a/test/subspace-test-runtime/src/lib.rs +++ b/test/subspace-test-runtime/src/lib.rs @@ -28,7 +28,7 @@ use core::mem; use core::num::NonZeroU64; use domain_runtime_primitives::opaque::Header as DomainHeader; use domain_runtime_primitives::{ - BlockNumber as DomainNumber, Hash as DomainHash, MultiAccountId, TryConvertBack, + AccountIdConverter, BlockNumber as DomainNumber, Hash as DomainHash, }; use frame_support::inherent::ProvideInherent; use frame_support::traits::{ @@ -63,8 +63,8 @@ use sp_messenger::messages::{BlockMessagesWithStorageKey, ChainId, CrossDomainMe use sp_messenger_host_functions::{get_storage_key, StorageKeyRequest}; use sp_mmr_primitives::{EncodableOpaqueLeaf, Proof}; use sp_runtime::traits::{ - AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, ConstBool, Convert, - DispatchInfoOf, Keccak256, NumberFor, PostDispatchInfoOf, Zero, + AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, ConstBool, DispatchInfoOf, + Keccak256, NumberFor, PostDispatchInfoOf, Zero, }; use sp_runtime::transaction_validity::{ InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, @@ -594,23 +594,6 @@ parameter_types! { pub const TransporterEndpointId: EndpointId = 1; } -pub struct AccountIdConverter; - -impl Convert for AccountIdConverter { - fn convert(account_id: AccountId) -> MultiAccountId { - MultiAccountId::AccountId32(account_id.into()) - } -} - -impl TryConvertBack for AccountIdConverter { - fn try_convert_back(multi_account_id: MultiAccountId) -> Option { - match multi_account_id { - MultiAccountId::AccountId32(acc) => Some(AccountId::from(acc)), - _ => None, - } - } -} - impl pallet_transporter::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SelfChainId = SelfChainId; @@ -648,6 +631,8 @@ parameter_types! { pub const MaxNominators: u32 = 100; pub SudoId: AccountId = Sudo::key().expect("Sudo account must exist"); pub const DomainsPalletId: PalletId = PalletId(*b"domains_"); + pub const MaxInitialDomainAccounts: u32 = 20; + pub const MinInitialDomainAccountBalance: Balance = SSC; } // Minimum operator stake must be >= minimum nominator stake since operator is also a nominator. @@ -695,6 +680,9 @@ impl pallet_domains::Config for Runtime { type PalletId = DomainsPalletId; type StorageFee = TransactionFees; type BlockSlot = BlockSlot; + type DomainsTransfersTracker = Transporter; + type MaxInitialDomainAccounts = MaxInitialDomainAccounts; + type MinInitialDomainAccountBalance = MinInitialDomainAccountBalance; } parameter_types! { diff --git a/test/subspace-test-service/src/lib.rs b/test/subspace-test-service/src/lib.rs index f3cb5578d8..b54be2993c 100644 --- a/test/subspace-test-service/src/lib.rs +++ b/test/subspace-test-service/src/lib.rs @@ -91,6 +91,8 @@ use subspace_test_runtime::{RuntimeApi, RuntimeCall, UncheckedExtrinsic, SLOT_DU type FraudProofFor = FraudProof, ::Hash, ::Header>; +const MAX_PRODUCE_BUNDLE_TRY: usize = 10; + /// Create a Subspace `Configuration`. /// /// By default an in-memory socket will be used, therefore you need to provide boot @@ -525,13 +527,15 @@ impl MockConsensusNode { &mut self, ) -> ( NewSlot, - Option, Hash, DomainHeader, Balance>>, + OpaqueBundle, Hash, DomainHeader, Balance>, ) { - let slot = self.produce_slot(); - - let bundle = self.notify_new_slot_and_wait_for_bundle(slot).await; - - (slot, bundle) + for _ in 0..MAX_PRODUCE_BUNDLE_TRY { + let slot = self.produce_slot(); + if let Some(bundle) = self.notify_new_slot_and_wait_for_bundle(slot).await { + return (slot, bundle); + } + } + panic!("Failed to produce bundle after {MAX_PRODUCE_BUNDLE_TRY:?} tries, something must be wrong"); } /// Subscribe the new slot notification @@ -874,7 +878,7 @@ impl MockConsensusNode { &mut self, extrinsics: Vec<::Extrinsic>, ) -> Result<(), Box> { - let (slot, _) = self.produce_slot_and_wait_for_bundle_submission().await; + let slot = self.produce_slot(); self.produce_block_with_slot_at(slot, self.client.info().best_hash, Some(extrinsics)) .await?; Ok(()) @@ -883,6 +887,16 @@ impl MockConsensusNode { /// Produce `n` number of blocks. #[sc_tracing::logging::prefix_logs_with(self.log_prefix)] pub async fn produce_blocks(&mut self, n: u64) -> Result<(), Box> { + for _ in 0..n { + let slot = self.produce_slot(); + self.produce_block_with_slot(slot).await?; + } + Ok(()) + } + + /// Produce `n` number of blocks and wait for bundle submitted to the block. + #[sc_tracing::logging::prefix_logs_with(self.log_prefix)] + pub async fn produce_blocks_with_bundles(&mut self, n: u64) -> Result<(), Box> { for _ in 0..n { let (slot, _) = self.produce_slot_and_wait_for_bundle_submission().await; self.produce_block_with_slot(slot).await?; @@ -1043,7 +1057,7 @@ macro_rules! produce_blocks { $( futs.push( Box::pin( $domain_node.wait_for_blocks($count) ) ); )* futures::future::join_all(futs) }; - $primary_node.produce_blocks($count).await?; + $primary_node.produce_blocks_with_bundles($count).await?; domain_fut.await; Ok::<(), Box>(()) }